filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_21818 | # -*- coding: utf-8 -*-
import cv2
import sys
# Get user supplied values
imagePath = sys.argv[1]
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
#flags = cv2.CV_HAAR_SCALE_IMAGE
)
print("Found {0} faces!".format(len(faces)))
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("Faces found", image)
cv2.waitKey(0) |
the-stack_0_21819 | # (C) Copyright Artificial Brain 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DEVICE = 'device'
SIMULATOR = 'simulator'
DEFAULT_SIMULATOR = 'qasm_simulator'
STATEVECTOR_SIMULATOR = 'statevector_simulator'
PARAMS = 'params'
BINARY = 'binary'
DECIMAL = 'decimal'
ANU_QRNG_URL = 'https://qrng.anu.edu.au/API/jsonI.php'
HEX_TYPE = 'hex16'
UNIT16_TYPE = 'uint16'
DEFAULT_REPETITIONS = 1024
DEFAULT_COLOR = '#B3365B'
COLORS = ['#B3365B', '#F46526', '#EFCBCA']
|
the-stack_0_21821 | # -*- coding: utf-8 -*-
"""
a805d4bd
This module fixes a bug with pickling and relative imports in Python < 2.6.
The problem is with pickling an e.g. `exceptions.KeyError` instance.
As SQLAlchemy has its own `exceptions` module, pickle will try to
lookup :exc:`KeyError` in the wrong module, resulting in this exception::
cPickle.PicklingError: Can't pickle <type 'exceptions.KeyError'>:
attribute lookup exceptions.KeyError failed
doing `import exceptions` just before the dump in `sqlalchemy.types`
reveals the source of the bug::
EXCEPTIONS: <module 'sqlalchemy.exc' from '/var/lib/hudson/jobs/celery/
workspace/buildenv/lib/python2.5/site-packages/sqlalchemy/exc.pyc'>
Hence the random module name "a805d5bd" is taken to decrease the chances of
a collision.
"""
from __future__ import absolute_import
from sqlalchemy.types import PickleType as _PickleType
class PickleType(_PickleType): # pragma: no cover
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
def process(value):
if value is not None:
value = dumps(value, protocol)
return impl_processor(value)
else:
def process(value): # noqa
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
def process(value):
value = impl_processor(value)
if value is not None:
return loads(value)
else:
def process(value): # noqa
if value is not None:
return loads(value)
return process
def copy_value(self, value):
if self.mutable:
return self.pickler.loads(self.pickler.dumps(value, self.protocol))
else:
return value
|
the-stack_0_21823 | from die import Die
import pygal
#创建一个D6
die =Die()
#掷几次骰子 并将结果存储在一个列表中
results = []
for roll_num in range(1000):
result = die.roll()
results.append(result)
#分析结果
frequencies = []
for value in range(1,die.num_sides+1):
frequency = results.count(value)
frequencies.append(frequency)
#对结果进行可视化
hist = pygal.Bar()
hist.title = "Results of rolling one D6 1000 times"
hist.x_labels = ['1','2','3','4','5','6']
hist.x_title = "Result"
hist.y_title = "Frequency of Result"
hist.add('D6',frequencies)
hist.render_to_file("F:/die_visual.svg") |
the-stack_0_21824 | #!/usr/bin/env python3
import os
from flask import Flask, render_template,request,redirect,url_for,jsonify,make_response
from flask_login import LoginManager, login_required, current_user,login_user,logout_user
from flask_babel import Babel,gettext
from flask_sqlalchemy import SQLAlchemy
import config
import datetime
from config import AUTH, MSG_INFO,MSG_OK,MSG_KO
#per abilitare CORS
from flask_cors import CORS, cross_origin
LANGUAGES = {
'en': 'English',
'it': 'Italian'
}
current_language = None
app = Flask(__name__)
# per abilitare CORS
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# fine CORS
app.secret_key = os.urandom(24)
# flask-login initialization
login_manager = LoginManager()
login_manager.init_app(app)
# flask-babel initialization
babel = Babel(app=app)
_ = gettext
# flask-sqlalchemy initialization
app.config['SQLALCHEMY_DATABASE_URI'] = config.SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
config.db = db
config.app = app
import user
import votation_dao
# import candidate
#import backend
import option_dao
import judgement_dao
import vote_dao
import vote_maj_jud
import vote_simple
import voter_dao
import voter_bo
import votation_bo
from model import Judgement, Votation
if config.AUTH == 'ldap':
import auth_ldap as auth
if config.AUTH == 'google':
import auth_google as auth
if config.AUTH == 'superauth':
import auth_superauth as auth
if config.AUTH == 'auth0':
import auth_auth0 as auth
if config.AUTH == 'test':
import auth_test as auth
@babel.localeselector
def get_locale():
# return 'it'
if current_language:
return current_language
return request.accept_languages.best_match(LANGUAGES.keys())
@login_manager.user_loader
def load_user(user_name):
u = user.User(user_name)
if u.is_valid():
return u
return None
@app.route("/")
@login_required
def index():
return render_template('index_template.html', pagetitle=_("Main menu"))
@app.route("/credits")
def credits():
return render_template('docs/credits.html', pagetitle=_("Credits"))
@app.route("/terms-and-conditions")
def termsandconditions():
return render_template('docs/terms-and-conditions.html', pagetitle=_("Credits"))
@app.route("/login", methods=['GET',])
def login():
message = None
return render_template(auth.LOGIN_TEMPLATE, pagetitle=_("Spritz, Online Voting System"),message=message, CLIENT_ID=auth.CLIENT_ID)
@app.route("/login_test_callback", methods=['POST',])
def login_test_callback():
message = None
auth_data = auth.get_auth_data(request)
auth_result = auth.auth(auth_data)
if auth_result['logged_in']:
u = user.User(auth_result['username'])
login_user(u)
message = (auth_result['message'],MSG_OK)
return render_template(auth.LOGIN_TEMPLATE, pagetitle=_("Login successful"),message=message)
else:
message = (auth_result['message'],MSG_KO)
return render_template(auth.LOGIN_TEMPLATE, pagetitle=_("Login unsuccessful"),message=message)
@app.route("/login_auth0")
def login_auth0():
return auth.get_auth_data() # is a request to redirect to callback_url
@app.route("/superauthcallback")
def superauth_callback():
message = None
auth_data = auth.get_auth_data(request)
auth_result = auth.auth(auth_data)
if auth_result['logged_in']:
u = user.User(auth_result['username'])
login_user(u)
message = (auth_result['message'],MSG_OK)
else:
message = (auth_result['message'],MSG_KO)
return render_template(auth.LOGIN_TEMPLATE, pagetitle=_("Login result"),message=message)
@app.route('/auth0_callback_url')
def auth0_callback_handling():
message = None
auth_result = auth.auth()
if auth_result['logged_in']:
u = user.User(auth_result['username'])
login_user(u)
message = (auth_result['message'],MSG_OK)
return redirect(url_for('votation_list'))
else:
message = (auth_result['message'],MSG_KO)
return redirect(url_for('login'))
@app.route("/logout")
@login_required
def logout():
logout_user()
#return render_template('logout_template.html', pagetitle="Logout")
return auth.logout_action()
@app.route("/votation_propose", methods=['GET', 'POST'])
@login_required
def votation_propose():
v = Votation()
message = (_("Please, insert data"),MSG_INFO)
if request.method == 'POST':
#v.votation_id = request.form['votation_id']
v.votation_description = request.form['votation_description']
v.description_url = request.form['description_url']
v.begin_date = request.form['utc_begin_date']
v.end_date = request.form['utc_end_date']
v.votation_type = request.form['votation_type']
v.list_voters = 0
if 'list_voters' in request.form.keys():
v.list_voters = request.form['list_voters']
v.promoter_user_id = current_user.u.user_id
if v.votation_type == votation_dao.TYPE_DRAW:
v.votation_status = votation_dao.STATUS_WAIT_FOR_CAND_AND_GUAR
else:
v.votation_status = votation_dao.STATUS_VOTING
message = votation_bo.insert_votation_with_options(v, request.form['votation_options'], request.form['votation_juds'])
return render_template('votation_propose_template.html', pagetitle=_("New election"), \
votation_obj=v, message=message,utcnow=str(datetime.datetime.utcnow()) )
@app.route("/votation_list")
@login_required
def votation_list():
votations_array = votation_dao.load_votations()
votations_array.reverse()
return render_template('votation_list_template.html', pagetitle=_("Election list"), \
votations_array=votations_array,states=votation_dao.states,type_description=votation_dao.TYPE_DESCRIPTION)
# @app.route("/be_a_candidate/<int:votation_id>")
# @login_required
# def be_a_candidate(votation_id):
# v = votation_dao.load_votation_by_id(votation_id)
# return render_template('be_a_candidate_template.html', pagetitle="Candidatura", v=v)
# @app.route("/be_a_candidate_confirm")
# @login_required
# def be_a_candidate_confirm():
# votation_id = int(request.args.get('votation_id'))
# v = votation_dao.load_votation_by_id(votation_id)
# message = ("Ora sei un candidato",MSG_OK)
# o = candidate.candidate_dto()
# app.logger.info(o)
# o.votation_id = votation_id
# o.u.user_id = current_user.u.user_id
# o.passphrase_ok = 0
# error = candidate.validate_dto(o)
# if error == 0:
# candidate.insert_dto(o)
# else:
# message = (candidate.error_messages[error] + ": " + v.votation_description,MSG_KO )
# return render_template('be_a_candidate_confirm_template.html', pagetitle="Conferma candidatura", \
# v=v,message=message)
@app.route("/votation_detail/<int:votation_id>")
@login_required
def votation_detail(votation_id):
v = votation_dao.load_votation_by_id(votation_id)
options_array = option_dao.load_options_by_votation(v.votation_id)
voters_array = None
if v.list_voters:
voters_array = voter_dao.load_voters_list(votation_id)
if v.votation_type == votation_dao.TYPE_MAJORITY_JUDGMENT:
return votation_detail_maj_jud(v,options_array, voters_array)
# if v.votation_type == votation_dao.TYPE_DRAW:
# return votation_detail_draw(v)
if v.votation_type == votation_dao.TYPE_SIMPLE_MAJORITY:
return votation_detail_simple(v, options_array, voters_array)
if v.votation_type == votation_dao.TYPE_LIST_RAND:
return votation_detail_list_rand(v, options_array, voters_array)
# def votation_detail_draw(v):
# candidates_array = None
# counting = None
# candidates_array = candidate.load_candidate_by_votation(v.votation_id)
# # if v.votation_status > votation_dao.STATUS_WAIT_FOR_CAND_AND_GUAR:
# # state_array = backend.election_state(votation_id)
# # else:
# # state_array = []
# return render_template('draw/votation_detail_template.html', pagetitle="Election details", \
# v=v, candidates_array=candidates_array, \
# states=votation_dao.states, \
# count_voters=voter_dao.count_voters(v.votation_id), \
# count_votes=vote_dao.count_votes(v.votation_id), \
# votation_timing=votation_dao.votation_timing(v),counting=counting, \
# words=votation_dao.WORDS, type_description=votation_dao.TYPE_DESCRIPTION)
def votation_detail_maj_jud(v, options_array, voters_array):
juds_array = judgement_dao.load_judgement_by_votation(v.votation_id)
counting = None
is_voter = voter_dao.is_voter(v.votation_id, current_user.u.user_id)
if v.votation_status == votation_dao.STATUS_ENDED:
counting = vote_maj_jud.votation_counting(v)
return render_template('majority_jud/votation_detail_template.html', pagetitle=_("Election details"), \
v=v, \
states=votation_dao.states, options_array=options_array,juds_array=juds_array, \
count_voters=voter_dao.count_voters(v.votation_id), \
count_votes=vote_dao.count_votes(v.votation_id), \
votation_timing=votation_dao.votation_timing(v),counting=counting, \
type_description=votation_dao.TYPE_DESCRIPTION, \
is_voter=is_voter, voters_array=voters_array)
def votation_detail_simple(v, options_array, voters_array):
counting = None
is_voter = voter_dao.is_voter(v.votation_id, current_user.u.user_id)
if v.votation_status == votation_dao.STATUS_ENDED:
counting = vote_simple.counting_votes(v.votation_id)
return render_template('simple_majority/votation_detail_template.html', pagetitle=_("Election details"), \
v=v, \
states=votation_dao.states, options_array=options_array, \
count_voters=voter_dao.count_voters(v.votation_id), \
count_votes=vote_dao.count_votes(v.votation_id), \
votation_timing=votation_dao.votation_timing(v),counting=counting, \
type_description=votation_dao.TYPE_DESCRIPTION, \
is_voter=is_voter, voters_array=voters_array)
def votation_detail_list_rand(v, options_array,voters_array):
import vote_list_rand
juds_array = judgement_dao.load_judgement_by_votation(v.votation_id)
counting = None
randomized_list = None
is_voter = voter_dao.is_voter(v.votation_id, current_user.u.user_id)
if v.votation_status == votation_dao.STATUS_ENDED:
counting = vote_maj_jud.votation_counting(v)
randomized_list = vote_list_rand.randomized_list(v,options_array)
return render_template('list_rand/votation_detail_template.html', pagetitle=_("Election details"), \
v=v, \
states=votation_dao.states, options_array=options_array,juds_array=juds_array, \
count_voters=voter_dao.count_voters(v.votation_id), \
count_votes=vote_dao.count_votes(v.votation_id), \
votation_timing=votation_dao.votation_timing(v),counting=counting, \
type_description=votation_dao.TYPE_DESCRIPTION, \
is_voter=is_voter, voters_array=voters_array, randomized_list=randomized_list)
@app.route("/close_election/<int:votation_id>")
@login_required
def close_election(votation_id):
#v = votation_dao.load_votation_by_id(votation_id)
#votation_dao.update_status(votation_id,votation_dao.STATUS_ENDED)
votation_bo.set_votation_status_ended(votation_id)
return render_template('thank_you_template.html', votation_id=votation_id, \
pagetitle=_("Election closed"), \
message=(_("Election closed, please, check results"),MSG_OK))
@app.route("/delete_election/<int:votation_id>")
@login_required
def delete_election(votation_id):
if request.args.get('confirm') == "yes":
votation_bo.deltree_votation_by_id(votation_id)
return render_template('thank_you_template.html', votation_id=0, \
pagetitle=_("Delete"), \
message=(_("Election deleted"),MSG_OK))
else:
return render_template('confirmation_template.html', \
pagetitle=_("Delete"), \
message=None,votation_id=votation_id)
@app.route("/add_voters", methods=["POST",])
@login_required
def add_voters():
votation_id = int(request.form['votation_id'])
v = votation_dao.load_votation_by_id(votation_id)
if v.promoter_user.user_id == current_user.u.user_id:
list_voters = request.form['list_voters']
ar = voter_dao.split_string_remove_dup(list_voters)
n = voter_bo.insert_voters_array(votation_id,ar)
return render_template('thank_you_template.html', votation_id=votation_id, \
pagetitle=_("Voter"), \
message=(_("{} voters being added").format(n),MSG_OK))
if v.promoter_user.user_id != current_user.u.user_id:
return render_template('thank_you_template.html', votation_id=votation_id,\
pagetitle=_("Voters"), \
message=(_("Sorry, only the owner of this election can add voters"),MSG_KO))
@login_manager.unauthorized_handler
def unauthorized():
return redirect(url_for('login'))
# @app.route("/version")
# def print_version():
# return render_template('version_template.html', pagetitle="Frontend Version", version=os.environ['voting_version'])
@app.route("/vote/<int:votation_id>", methods=['GET', 'POST'])
@login_required
def vote_(votation_id):
v = votation_dao.load_votation_by_id(votation_id)
if votation_dao.votation_timing(v) != 0:
return redirect('/votation_detail/'+str(votation_id))
if voter_dao.is_voter(votation_id, current_user.u.user_id) == False:
return redirect('/votation_detail/'+str(votation_id))
if v.votation_type == votation_dao.TYPE_MAJORITY_JUDGMENT:
return votemajjud(v)
if v.votation_type == votation_dao.TYPE_SIMPLE_MAJORITY:
return votesimplemaj(v)
if v.votation_type == votation_dao.TYPE_LIST_RAND:
return votelistrand(v)
def votemajjud(v):
options_array = option_dao.load_options_by_votation(v.votation_id)
if request.method == 'GET':
Judgement_array = judgement_dao.load_judgement_by_votation(v.votation_id)
Judgement_array.reverse()
return render_template('majority_jud/vote_template.html',
pagetitle=_("Vote"),
v=v,
options_array=options_array,
juds_array=Judgement_array)
if request.method == 'POST':
vote_key = request.form["vote_key"]
vote_array = []
for c in options_array:
param = "v_" + str(c.option_id)
vote_array.append(int(request.form[param]))
result = vote_maj_jud.save_votes(current_user.u.user_id, vote_key, v.votation_id, vote_array )
if result:
message = (_("Your vote has been registered"), MSG_OK)
else:
message = (_("Error. Vote NOT registered. Wrong key?"),MSG_KO)
return render_template('thank_you_template.html', votation_id=v.votation_id, pagetitle=_("Vote registering"), message=message)
def votesimplemaj(v):
options_array = option_dao.load_options_by_votation(v.votation_id)
if request.method == 'GET':
return render_template('simple_majority/vote_template.html', pagetitle="Vota", \
v=v, options_array=options_array)
if request.method == 'POST':
vote_key = request.form["vote_key"]
my_vote = request.form["my_vote"]
result = vote_simple.save_vote(current_user.u.user_id, vote_key, v.votation_id,int(my_vote))
if result:
message = (_("Your vote has been registered"), MSG_OK)
else:
message = (_("Error. Vote NOT registered. Wrong Password?"),MSG_KO)
return render_template('thank_you_template.html', votation_id=v.votation_id, pagetitle=_("Vote registering"), message=message)
def votelistrand(v):
options_array = option_dao.load_options_by_votation(v.votation_id)
if request.method == 'GET':
return render_template('list_rand/vote_template.html', pagetitle=_("Vote"), \
v=v, options_array=options_array,words_array=judgement_dao.load_judgement_by_votation(v.votation_id))
if request.method == 'POST':
vote_key = request.form["vote_key"]
vote_array = []
for c in options_array:
param = "v_" + str(c.option_id)
vote_array.append(int(request.form[param]))
result = vote_maj_jud.save_votes(current_user.u.user_id, vote_key, v.votation_id, vote_array )
if result:
message = (_("Your vote has been registered"), MSG_OK)
else:
message = (_("Error. Vote NOT registered. Wrong key?"),MSG_KO)
return render_template('thank_you_template.html', votation_id=v.votation_id, pagetitle=_("Vote registering"), message=message)
@app.route("/update_end_date/<int:votation_id>", methods=['GET',])
@login_required
def update_end_date(votation_id):
v = votation_dao.load_votation_by_id(votation_id)
if current_user.u.user_id == v.promoter_user.user_id:
end_date = request.args.get('end_date')
end_time = request.args.get('end_time')
if end_date and end_time:
votation_bo.update_end_date(votation_id, end_date + " " + end_time)
return "OK"
return "KO"
@app.route("/lang/<lang_code>")
@login_required
def lang(lang_code):
global current_language
current_language = lang_code
return render_template('index_template.html', pagetitle=_("Main menu"))
@app.route("/api/report/<int:votation_id>", methods=['GET',])
@login_required
def api_report(votation_id):
csv = vote_dao.get_report_data(votation_id)
response = make_response(csv)
cd = 'attachment; filename=election_{}_result.csv'.format(votation_id)
response.headers['Content-Disposition'] = cd
response.mimetype='text/csv'
return response
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
the-stack_0_21825 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
blue = (0, 0, 1.0)
green = (0, 0.8, 0)
red = (1.0, 0, 0)
red_alpha = (1.0, 0, 0, 0.1)
gray = (0.7, 0.7, 0.7)
x = np.arange(0, 1, 0.0001)
p_y = norm.pdf(x, 0.6, 0.1)
plt.plot(x, p_y, color=red)
plt.plot([0.5,0.5], [0.0, np.max(p_y)], '-', color=gray)
plt.plot([0.6,0.6], [0.0, np.max(p_y)+0.01], ':', color=gray)
plt.text(0.6, np.max(p_y) + 0.2, r"$\mathbb{E}_{\cal L} \{ p_{\cal L}(Y=\varphi_B(x)) \}$", fontsize=15, horizontalalignment='center')
plt.text(0.6, 1.7, r"$Var_{\cal L}\{ p_{\cal L}(Y=\varphi_B(x)) \}$", fontsize=15, horizontalalignment='left')
plt.annotate(
'', xy=(0.45, 2.0), xycoords = 'data',
xytext = (0.75, 2.0), textcoords = 'data',
arrowprops = {'arrowstyle':'<->'})
plt.annotate(r"$P_{\cal L}(\varphi_{\cal L}(x)\neq \varphi_B(x))$", xy=(0.475, 1.0), xycoords='data', fontsize=15, xytext=(0.2, 1.7), textcoords='data', arrowprops={'arrowstyle':'->'})
plt.fill_between(x, p_y, y2=0, where=x<0.5, color=red_alpha)
plt.ylabel("$P$")
plt.ylim((0., 4.5))
plt.xlim((0., 1.0))
plt.xticks([0.0, 0.5, 1.0])
plt.yticks([])
plt.show()
|
the-stack_0_21826 | """
Retrain the YOLO model for your own dataset.
"""
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = 'train.txt'
log_dir = 'logs/000/'
classes_path = 'model_data/voc_classes.txt'
anchors_path = 'model_data/tiny_yolo_anchors.txt'
class_names = ['Basket Ball']
num_classes = 1
anchors = get_anchors(anchors_path)
input_shape = (608,608) # multiple of 32, hw
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
model.compile(optimizer=Adam(lr=1e-4), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 32
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze the darknet body or freeze all but 2 output layers.
num = (20, len(model_body.layers)-2)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
|
the-stack_0_21828 | # Copyright 2020 Nokia.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import base
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.dashboards.project.networks import \
workflows as original
from nuage_horizon.api import neutron
LOG = logging.getLogger(__name__)
class UnsafeChoiceField(forms.ChoiceField):
"""
This is an extension of the default choicefield with the exception that it
will not validate that the value in the POST request matches the value
during rendering of the Choicefield (In case Javascript alters the values
client-side)
"""
def validate(self, value):
pass
class CreateSubnetTypeAction(workflows.Action):
with_subnet = forms.BooleanField(label=_("Create Subnet"),
widget=forms.CheckboxInput(attrs={
'class': 'switchable',
'data-slug': 'with_subnet',
'data-hide-tabs': 'create_network__'
'createsubnetdetail'
'action '
'create_network__'
'createsubnetinfo'
'action',
'data-hide-on-checked': 'false'
}),
initial=True,
required=False)
subnet_type = forms.ChoiceField(label=_("Subnet type choice"),
widget=forms.Select(attrs={
'class': 'switched',
'data-slug': 'nuage_id',
'data-switch-on': 'with_subnet',
}),
help_text=_(
"Optional Subnet ID from Nuage. "
"This links the subnet to an "
"existing Nuage one, making it "
"VSD managed"),
required=False)
org_id = UnsafeChoiceField(label=_("Organisation choice"),
required=False)
dom_id = UnsafeChoiceField(label=_("Domain choice"),
required=False)
zone_id = UnsafeChoiceField(label=_("Zone choice"),
required=False)
sub_id = UnsafeChoiceField(label=_("Subnet choice"),
required=False)
ip_version_ = UnsafeChoiceField(label=_("Cidr choice"),
required=False)
hidden_org = forms.CharField(widget=forms.HiddenInput,
required=False)
hidden_dom = forms.CharField(widget=forms.HiddenInput,
required=False)
hidden_zone = forms.CharField(widget=forms.HiddenInput,
required=False)
hidden_sub = forms.CharField(widget=forms.HiddenInput,
required=False)
hidden_ip_version_ = forms.CharField(widget=forms.HiddenInput,
required=False)
hidden_gateway_ = forms.CharField(widget=forms.HiddenInput,
required=False)
class Meta:
name = _("Subnet Type")
help_text = _('Choose the type of subnet you are about to create.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetTypeAction, self).__init__(request, context, *args,
**kwargs)
if request.user.is_superuser:
self.fields['org_id'].choices = [('', _("Choose an Organization"))]
self.fields['dom_id'].choices = [('', _("Choose a Domain"))]
self.fields['zone_id'].choices = [('', _("Choose a Zone"))]
self.fields['sub_id'].choices = [('', _("Choose a Subnet"))]
self.fields['ip_version_'].choices = [('', _("Choose a cidr"))]
type_choices = [('os', _("OpenStack Managed Subnet")),
('vsd_manual', _("VSD Managed Subnet (Manual)")),
('vsd_auto', _("VSD Managed Subnet (Auto)"))]
self.fields['subnet_type'].choices = type_choices
def _org_to_choices(self, organisations):
choices = []
for org in organisations:
display_name = '(' + org['id'][:6] + ') ' + org['name']
choices.append((org['id'], display_name))
return choices
def is_valid(self):
valid = super(CreateSubnetTypeAction, self).is_valid()
if not self.request.user.is_superuser:
return valid
if self.data['subnet_type'] == 'vsd_auto':
if not self.data['hidden_sub']:
self._errors['__all__'] = self.error_class(
['A subnet must be selected below.'])
valid = False
if ((self.data.get('with_subnet') or self.initial.get('network_id'))
and not self.data['subnet_type']):
self._errors['subnet_type'] = self.error_class(
['This is a required field.'])
valid = False
return valid
class CreateSubnetType(workflows.Step):
action_class = CreateSubnetTypeAction
contributes = ("with_subnet", "subnet_type", "org_id", "zone_id", "sub_id",
"hidden_org", "hidden_dom", "hidden_zone", "hidden_sub",
"hidden_ip_version_", "hidden_gateway_")
class CreateSubnetInfoAction(original.CreateSubnetInfoAction):
nuage_id = forms.CharField(max_length=255,
label=_("Nuage UUID"),
required=True,
initial='.')
net_partition = forms.CharField(max_length=255,
label=_("Nuage Net Partition"),
required=True,
initial='.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetInfoAction, self).__init__(request, context, *args,
**kwargs)
if 'with_subnet' in self.fields:
del self.fields['with_subnet']
def clean(self):
cleaned_data = super(workflows.Action, self) \
.clean()
if 'cidr' in cleaned_data.keys() \
and cleaned_data['cidr']:
self._check_subnet_data(cleaned_data)
return cleaned_data
def get_hidden_fields(self, context):
hidden = True
shown = False
if context['subnet_type'] == 'os':
return {'id_nuage_id': hidden,
'id_net_partition': hidden,
'subnet_name': shown,
'id_cidr': shown,
'id_ip_version': shown,
'id_gateway_ip': shown,
'id_no_gateway': shown}
elif context['subnet_type'] == 'vsd_manual':
return {'id_nuage_id': shown,
'id_net_partition': shown,
'subnet_name': shown,
'id_cidr': shown,
'id_ip_version': shown,
'id_gateway_ip': shown,
'id_no_gateway': shown}
else:
return {'id_nuage_id': shown,
'id_net_partition': shown,
'subnet_name': shown,
'id_cidr': shown,
'id_ip_version': shown,
'id_gateway_ip': shown if
context['hidden_gateway_'] else hidden,
'id_no_gateway': shown,
'address_source': context['enable_dhcp']} # managed
def get_locked_fields(self, context, form_data):
if context['subnet_type'] != 'vsd_manual' \
and context['subnet_type'] != 'os':
return self._get_locked_fields(True, form_data)
else:
return self._get_locked_fields(False, form_data)
def _get_locked_fields(self, locked, form_data):
locked_fields = {'id_gateway_ip': locked,
'id_nuage_id': locked,
'id_net_partition': locked}
if 'id_cidr' in form_data:
locked_fields['id_cidr'] = locked and form_data['id_cidr']
locked_fields['id_ip_version'] = locked and form_data['id_cidr']
locked_fields['id_gateway_ip'] = locked and form_data['id_cidr']
return locked_fields
def get_form_data(self, context, request):
if context['subnet_type'] == 'vsd_manual':
return {'id_cidr': '',
'id_gateway_ip': '',
'id_subnet_name': '',
'id_nuage_id': '',
'id_net_partition': ''}
elif context['subnet_type'] == 'os':
return {'id_cidr': '',
'id_gateway_ip': '',
'id_subnet_name': '',
'id_nuage_id': '.',
'id_net_partition': '.'}
else:
if not context['sub_id']:
return {}
vsd_subnet = neutron.vsd_subnet_get(request, context['sub_id'])
vsd_organisation = neutron.vsd_organisation_list(
request, id=context['org_id'])[0]
request.session['vsd_subnet'] = vsd_subnet
request.session['vsd_organisation'] = vsd_organisation
if not self.data['hidden_ip_version_']:
ip_version = '4'
else:
ip_version = self.data['hidden_ip_version_']
if str(ip_version) == '4':
cidr = vsd_subnet['cidr']
else:
cidr = vsd_subnet['ipv6_cidr']
return {'id_nuage_id': vsd_subnet['id'],
'id_net_partition': vsd_organisation['name'],
'id_cidr': cidr,
'id_gateway_ip': context['hidden_gateway_'],
'id_ip_version': ip_version,
'id_subnet_name': vsd_subnet['name']}
class Meta:
name = _("Subnet")
help_text = _('Create a subnet associated with the new network, '
'in which case "Network Address" must be specified. '
'If you wish to create a network without a subnet, '
'uncheck the "Create Subnet" checkbox.')
class CreateSubnetInfo(workflows.Step):
action_class = CreateSubnetInfoAction
contributes = ("subnet_name", "cidr", "ip_version", "gateway_ip",
"no_gateway", "nuage_id", "net_partition")
class CreateSubnetDetailAction(original.CreateSubnetDetailAction):
underlay = forms.ChoiceField(label=_("Underlay"),
choices=[('default', _('Default')),
('true', _('True')),
('false', _('False'))])
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetDetailAction, self).__init__(request, context, *args,
**kwargs)
if context.get('nuage_id') and context['nuage_id'] != ".":
try:
vsd_subnet = neutron.vsd_subnet_get(request,
context['nuage_id'])
except Exception:
msg = "Failed to find Nuage UUID {} on VSD"\
.format(context['nuage_id'])
exceptions.handle(request, msg, redirect=False)
else:
request.session['vsd_subnet'] = vsd_subnet
if not request.user.is_superuser or not context.get('network_id'):
del self.fields['underlay']
else:
network = neutron.network_get(request, context['network_id'])
if not network or not network.get('router:external', False):
del self.fields['underlay']
def get_hidden_fields(self, context):
hidden = {'id_enable_dhcp': False,
'id_ipv6_modes': True}
return hidden
class Meta:
name = _("Subnet Details")
help_text = _('Specify additional attributes for the subnet.')
class CreateSubnetDetail(original.CreateSubnetDetail):
action_class = CreateSubnetDetailAction
contributes = ("enable_dhcp", "ipv6_modes", "allocation_pools",
"dns_nameservers", "host_routes", "underlay")
class CreateNetworkInfoAction(original.CreateNetworkInfoAction):
with_subnet = forms.BooleanField(label=_("Create Subnet"),
widget=forms.CheckboxInput(attrs={
'class': 'switchable',
'data-slug': 'with_subnet',
'data-hide-tab': 'create_network__'
'createsubnetinfo'
'action,'
'create_network__'
'createsubnetdetail'
'action,'
'create_network__'
'createsubnettype'
'action',
'data-hide-on-checked': 'false'
}),
initial=True,
required=False)
class Meta(object):
name = _("Network")
help_text = _('Create a new network. '
'In addition, a subnet associated with the network '
'can be created in the following steps of this wizard.')
class CreateNetworkInfo(original.CreateNetworkInfo):
action_class = CreateNetworkInfoAction
class CreateNetwork(original.CreateNetwork):
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
if not request.user.is_superuser:
try:
CreateNetwork.unregister(CreateNetworkInfo)
CreateNetwork.unregister(CreateSubnetType)
CreateNetwork.unregister(CreateSubnetInfo)
CreateNetwork.unregister(CreateSubnetDetail)
except base.NotRegistered:
pass
self.default_steps = (original.CreateNetworkInfo,
original.CreateSubnetInfo,
original.CreateSubnetDetail)
else:
try:
CreateNetwork.unregister(original.CreateNetworkInfo)
CreateNetwork.unregister(original.CreateSubnetInfo)
CreateNetwork.unregister(original.CreateSubnetDetail)
except base.NotRegistered:
pass
self.default_steps = (CreateNetworkInfo,
CreateSubnetType,
CreateSubnetInfo,
CreateSubnetDetail)
super(CreateNetwork, self).__init__(request, context_seed, entry_point,
*args, **kwargs)
def handle(self, request, data):
network = self._create_network(request, data)
if not network:
return False
if not data['with_subnet']:
return True
subnet = self._create_subnet(request, data, network, no_redirect=True)
if subnet:
return True
else:
self._delete_network(request, network)
return False
def _create_subnet(self, request, data, network=None,
no_redirect=False):
if network:
network_id = network.id
network_name = network.name
else:
network_id = self.context.get('network_id')
network_name = self.context.get('network_name')
try:
# TODO(team) refactoring This code is duplicated from
# the subnet workflow
params = {'network_id': network_id,
'name': data['subnet_name'],
'cidr': data['cidr'],
'ip_version': int(data['ip_version']),
'enable_dhcp': data['enable_dhcp']}
if request.user.is_superuser and data.get('subnet_type') != 'os':
params['nuagenet'] = data['nuage_id']
params['net_partition'] = data['net_partition']
if (request.user.is_superuser and data.get('underlay')
and data.get('underlay') != 'default'):
params['underlay'] = data['underlay']
params['gateway_ip'] = (
None if data['no_gateway'] else data['gateway_ip'])
self._setup_subnet_parameters(params, data)
subnet = neutron.subnet_create(request, **params)
self.context['subnet_id'] = subnet.id
if 'vsd_subnet' in request.session.keys():
del request.session['vsd_subnet']
msg = _('Subnet "%s" was successfully created.') % data['cidr']
LOG.debug(msg)
return subnet
except Exception as e:
msg = _('Failed to create subnet "%(sub)s" for network "%(net)s": '
' %(reason)s')
if no_redirect:
redirect = None
else:
redirect = self.get_failure_url()
exceptions.handle(request,
msg % {"sub": data['cidr'], "net": network_name,
"reason": e},
redirect=redirect)
return False
@classmethod
def _unregister(cls, step_class):
pass
|
the-stack_0_21829 | """
This script runs the python_webapp_flask application using a development server.
"""
from os import environ
from webapp import app
if __name__ == '__main__':
HOST = environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(environ.get('SERVER_PORT', '5000'))
except ValueError:
PORT = 5000
app.run(HOST, PORT)
# How run.bat it works?
# Why docker on windows?
|
the-stack_0_21830 | class Gaitpy():
'''
Gait feature extraction and bout classification from single accelerometer in the lumbar location. This class includes functions for:
- Continuous wavelet based method of gait kinematic feature extraction.
- Machine learning based method of bout classification.
- Visualizing results.
Parameters:
data: str or pandas.core.frame.DataFrame
- Option 1: Pandas dataframe containing unix timestamp column and vertical acceleration data during gait, both of type float
- Option 2: File path of .csv file containing timestamp column and vertical acceleration data during gait. One column should contain unix timestamps of type float (by default gaitpy will assume the column title is 'timestamps' with units in milliseconds). A second column should be vertical acceleration of type float (by default gaitpy will assume the column title is 'y' with units in m/s^2).
sample_rate: int or float
Sampling rate of accelerometer data in Hertz.
v_acc_col_name: str
Column name of the vertical acceleration data ('y' by default)
ts_col_name: str
Column name of the timestamps ('timestamps' by default)
v_acc_units: str
Units of vertical acceleration data ('m/s^2' by default). Options:
- 'm/s^2' = meters per second squared
- 'g' = standard gravity
ts_units: str
Units of timestamps ('ms' by default). Options:
- 's' = seconds
- 'ms' = milli-seconds
- 'us' = microseconds
flip: bool
Boolean specifying whether to flip vertical acceleration data before analysis (False by default). Algorithm
assumes that baseline vertical acceleration data is at -9.8 m/s^2 or -1g. (ie. If baseline data in vertical
direction is 1g, set 'flip' argument to True)
'''
def __init__(self, data, sample_rate, v_acc_col_name='y', ts_col_name='timestamps', v_acc_units='m/s^2', ts_units='ms', flip=False):
self.data = data
self.sample_rate = sample_rate
self.v_acc_col_name = v_acc_col_name
self.ts_col_name = ts_col_name
self.v_acc_units = v_acc_units
self.ts_units = ts_units
self.flip = flip
self.down_sample = 50
def extract_features(self, subject_height, subject_height_units='centimeters', sensor_height_ratio=0.53, result_file=None, classified_gait=None, ic_prom=5, fc_prom=10):
''' Inverted pendulum and continuous wavelet based method of gait feature detection
Parameters:
subject_height: int or float
Height of the subject. Accepts centimeters by default.
subject_height_units: str
Units of provided subject height. Centimeters by default.
- options: 'centimeters', 'inches', 'meters'
sensor_height_ratio: float
Height of the sensor relative to subject height. Calculated: sensor height / subject height
result_file: str
Optional argument that accepts .csv filepath string to save resulting gait feature dataframe to.
None by default. (ie. myfolder/myfile.csv)
classified_gait: str or pandas.core.frame.DataFrame
Pandas dataframe containing results of gait bout classification procedure (classify_bouts)
OR
File path of .h5 file containing results of gait bout classification procedure (classify_bouts)
ic_prom: int
Prominance of initial contact peak detection
fc_prom: int
Prominance of final contact peak detection
Returns:
pandas.core.frame.DataFrame
Pandas dataframe containing results of feature extraction procedure (extract_features)
'''
import pandas as pd
import gaitpy.util as util
import warnings
import numpy as np
print('\tExtracting features...')
# Load data
y_accel, timestamps = util._load_data(self, self.down_sample)
# Calculate sensor height
sensor_height = util._calculate_sensor_height(subject_height, subject_height_units, sensor_height_ratio)
# If classified gait is provided, load pandas dataframe or h5 file
if classified_gait is not None:
if type(classified_gait) is str:
gait_predictions = pd.read_hdf(classified_gait)
elif type(classified_gait) is pd.core.frame.DataFrame:
gait_predictions = classified_gait
else:
print('Unable to load classified gait: Please make sure the data is in the correct format, aborting...')
return
# Isolate gait bouts
gait_windows = gait_predictions[gait_predictions['prediction'] == 1]
if gait_windows.empty:
print('The classified_gait data indicates no bouts of gait were detected, aborting...')
return
# Concatenate concurrent bouts
gait_bouts = util._concatenate_windows(gait_windows, window_length=3)
else:
# if classified_gait is not provided, assume entire timeseries is 1 bout of gait
start_time = timestamps[0].astype('datetime64[ms]')
end_time = timestamps.iloc[-1].astype('datetime64[ms]')
gait_bouts = pd.DataFrame(data={'start_time': [start_time],
'end_time': [end_time],
'bout_length': [(end_time - start_time).item().total_seconds()]})
all_bout_gait_features = pd.DataFrame()
bout_n = 1
# Loop through gait bouts
for row_n, bout in gait_bouts.iterrows():
bout_indices = (timestamps.astype('datetime64[ms]') >= bout.start_time) & (timestamps.astype('datetime64[ms]') <= bout.end_time)
bout_data = pd.DataFrame([])
bout_data['y'] = pd.DataFrame(y_accel.loc[bout_indices].reset_index(drop=True))
bout_data['ts'] = timestamps.loc[bout_indices].reset_index(drop=True)
if len(bout_data.y) <= 15:
warnings.warn('There are too few data points between '+str(bout.start_time)+' and '+str(bout.end_time)+', skipping bout...')
continue
# Check the orientation of vertical axis
window_mu = np.mean(bout_data.y)
if window_mu < 0:
pass
else:
warnings.warn('Data appears to be flipped between '+str(bout.start_time)+' and '+str(bout.end_time)+', flipping axis...')
bout_data['y'] = bout_data['y'] * (-1)
# Run CWT Gait Model IC and FC detection
ic_peaks, fc_peaks = util._cwt(bout_data.y, self.down_sample, ic_prom, fc_prom)
# Run gait cycle optimization procedure
pd.options.mode.chained_assignment = None
optimized_gait = util._optimization(bout_data['ts'], ic_peaks, fc_peaks)
if optimized_gait.empty or 1 not in list(optimized_gait.Gait_Cycle):
continue
# Calculate changes in height of the center of mass
optimized_gait = util._height_change_com(optimized_gait, bout_data['ts'], bout_data['y'], self.down_sample)
# Calculate gait features
gait_features = util._cwt_feature_extraction(optimized_gait, sensor_height)
# remove center of mass height and gait cycle boolean columns, remove rows with NAs
gait_features.dropna(inplace=True)
gait_features.drop(['CoM_height','Gait_Cycle', 'FC_opp_foot'], axis=1, inplace=True)
if gait_features.empty:
continue
gait_features.insert(0, 'bout_number', bout_n)
gait_features.insert(1, 'bout_length_sec', bout.bout_length)
gait_features.insert(2, 'bout_start_time', bout.start_time)
gait_features.insert(5, 'gait_cycles', len(gait_features))
all_bout_gait_features = all_bout_gait_features.append(gait_features)
bout_n += 1
all_bout_gait_features.reset_index(drop=True, inplace=True)
all_bout_gait_features.iloc[:,7:] = all_bout_gait_features.iloc[:,7:].round(2)
# Save results
if result_file:
try:
if not result_file.endswith('.csv'):
result_file += '.csv'
all_bout_gait_features.to_csv(result_file, index=False, float_format='%.3f')
except:
print('Unable to save data: Please make sure your results directory exists, aborting...')
return
if all_bout_gait_features.empty:
print('\tFeature extraction complete. No gait cycles detected...\n')
else:
print('\tFeature extraction complete!\n')
return all_bout_gait_features
def plot_contacts(self, gait_features, result_file=None, show_plot=True):
""" Visualization of bouts, initial contacts, and final contacts of lumbar based gait feature extraction
Parameters:
gait_features: pandas.DataFrame or str
Pandas dataframe containing results of extract_features function
OR
File path of .csv file containing results of extract_features function
result_file: str
Optional argument that accepts .html filepath string to save resulting gait event plot to.
None by default. (ie. myfolder/myfile.html)
show_plot: bool
Optional boolean argument that specifies whether your plot is displayed. True by default.
"""
from bokeh.plotting import figure, output_file, save, show
from bokeh.models import Legend, Span
import pandas as pd
import gaitpy.util as util
import numpy as np
print('\tPlotting contacts...')
# Load data
y_accel, timestamps = util._load_data(self, self.down_sample)
ts = pd.to_datetime(timestamps, unit='ms')
# Load gait_features
try:
if type(gait_features) is str:
icfc = pd.read_csv(gait_features)
elif type(gait_features) is pd.core.frame.DataFrame:
icfc = gait_features
else:
print('Unable to load gait features: Please make sure the gait_features is in the correct format, aborting...')
return
except:
print('Unable to load gait features: Please make sure you have provided the correct filepath or dataframe, aborting...')
return
if icfc.empty:
print('\tGait feature dataframe is empty, aborting...')
return
p = figure(plot_width=1200, plot_height=600, x_axis_label='Time', y_axis_label='m/s^2', toolbar_location='above', x_axis_type='datetime')
# Plot vertical axis
p1 = p.line(ts, y_accel, line_width=2, line_color='blue')
# isolate ICs, FCs, and bout start/end times
minima_time = []
minima_signal = []
maxima_time = []
maxima_signal = []
bout_starts = []
bout_ends = []
ics = pd.to_datetime(icfc.IC, unit='ms')
fcs = pd.to_datetime(icfc.FC, unit='ms')
icfc.bout_start_time = icfc.bout_start_time.astype(np.int64).values // 10 ** 6
bouts = icfc[['bout_number', 'bout_length_sec', 'bout_start_time']].drop_duplicates()
for ic in ics:
minima_time.append(ic)
minima_signal.append(float(y_accel[ts.index[ts == ic]]))
for fc in fcs:
maxima_time.append(fc)
maxima_signal.append(float(y_accel[ts.index[ts == fc]]))
for row, bout in bouts.iterrows():
bout_starts.append(bout.bout_start_time)
bout_ends.append(bout.bout_start_time + (bout.bout_length_sec*1000))
# add IC and FCs to plot
p2 = p.circle(minima_time, minima_signal, size=15, color="green", alpha=0.5)
p3 = p.circle(maxima_time, maxima_signal, size=15, color="darkorange", alpha=0.5)
# add bout start and end times to plot
for bout_start in bout_starts:
start_bout_line = Span(location=bout_start,
dimension='height', line_color='green',
line_dash='solid', line_width=1.5)
p.add_layout(start_bout_line)
for bout_end in bout_ends:
end_bout_line = Span(location=bout_end,
dimension='height', line_color='red',
line_dash='solid', line_width=1.5)
p.add_layout(end_bout_line)
# add legend
legend = Legend(items=[
("Acceleration", [p1]),
("Initial contact", [p2]),
("Final contact", [p3])
], location=(10, 300))
# format plot
p.add_layout(legend, 'right')
p.xaxis.axis_label_text_font_size = "16pt"
p.yaxis.axis_label_text_font_size = "16pt"
p.axis.major_label_text_font_size = '16pt'
p.title.align = 'center'
p.title.text_font_size = '16pt'
p.xaxis.axis_label_text_font_style = 'normal'
p.yaxis.axis_label_text_font_style = 'normal'
p.xaxis.axis_label_standoff = 5
p.yaxis.axis_label_standoff = 20
p.legend.label_text_font = "arial"
p.legend.label_text_font_size = '16pt'
p.legend.glyph_height = 30
if show_plot:
show(p)
# save plot
if result_file:
try:
if not result_file.endswith('.html'):
result_file += '.html'
output_file(result_file)
save(p)
except:
print('Unable to save data: Please make sure your results directory exists, aborting...')
return
print('\tPlot complete!\n')
def classify_bouts(self, result_file=None):
""" Gait bout classification using acceleration data in the vertical direction from the lumbar location.
Parameters:
result_file: str
Optional argument that accepts .h5 filepath string to save resulting predictions to.
None by default. (ie. myfolder/myfile.h5)
Returns:
pandas.core.frame.DataFrame
Pandas dataframe containing results of bout classification procedure (classify_bouts)
"""
import pickle
import pandas as pd
import os
import deepdish as dd
import gaitpy.util as util
print('\tClassifying bouts of gait...')
# Load model and feature order
model_filename = os.path.dirname(os.path.realpath(__file__)) + '/model/model.pkl'
features_filename = os.path.dirname(os.path.realpath(__file__)) + '/model/feature_order.txt'
model = pickle.load(open(model_filename, 'rb'))
feature_order = open(features_filename, 'r').read().splitlines()
# Load data and convert to g
y_accel, ts = util._load_data(self, self.down_sample)
y_accel_g = y_accel / 9.80665
data = pd.DataFrame({'y': y_accel_g})
timestamps = pd.DatetimeIndex(ts.astype('datetime64[ms]'))
# Extract signal features from vertical acceleration data
feature_set, start_times_list, end_times_list = util._extract_signal_features(data, timestamps, self.down_sample)
feature_set = feature_set[feature_order]
# Predict
try:
pred = model.predict(feature_set)
predictions_df = pd.DataFrame(
{'prediction': pred, 'window_start_time': start_times_list, 'window_end_time': end_times_list})
except:
print('Unable to make predictions from signal features, aborting...')
return
# Save predictions to hdf file
if result_file:
try:
if not result_file.endswith('.h5'):
result_file += '.h5'
predictions_dict = {}
predictions_dict['predictions'] = predictions_df
dd.io.save(result_file, predictions_dict)
except:
print('Unable to save data: Please make sure your results directory exists, aborting...')
return
print('\tBout classification complete!\n')
return predictions_df
|
the-stack_0_21832 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 28 11:53:53 2021
@author: woojae-macbook13
"""
from gurobipy import*
try :
m = Model('ex9_3_1')
Z = LinExpr()
NODE = 5
X = m.addVars(NODE, NODE, vtype = GRB.INTEGER, name = 'X')
COST = [[0, 2, 4, 9, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 3],
[0, 0, 0, 2, 0]]
CAPA = [[0, 10, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 80],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
B = [50, 40, 0, -30, -60]
# 목적 함수
Z = 0
for i in range(NODE) :
for j in range(NODE) :
Z += X[i,j]*COST[i][j]
# 제약 조건
for i in range(NODE) :
tempC = 0
for j in range(NODE) :
if COST[i][j] != 0 :
tempC += X[i,j]
for k in range(NODE) :
if COST[k][i] != 0 :
tempC -= X[k,i]
c0 = tempC == B[i]
m.addConstr(c0, 'c0-'+str(i))
for i in range(NODE) :
for j in range(NODE) :
if CAPA[i][j] != 0 :
c1 = X[i,j] <= CAPA[i][j]
m.addConstr(c1, 'c1-'+str(i)+'-'+str(j))
for i in range(NODE) :
for j in range(NODE) :
c2 = X[i,j] >= 0
m.addConstr(c2, 'c2-'+str(i)+'-'+str(j))
m.setObjective(Z, GRB.MINIMIZE)
m.optimize()
for v in m.getVars() :
if v.x != 0 :
print(v.varName, ':', v.x)
print('Z : ', m.objVal)
except GurobiError() :
print('Error reported')
|
the-stack_0_21833 | import getpass
import json
import os
import platform
import re
import shutil
import string
import sys
import tempfile
from contextlib import contextmanager, suppress
from datetime import datetime, timezone
from functools import wraps
from io import BytesIO
from pathlib import Path
from typing import Any, Callable, IO, Iterator, List, Optional, Set, Union, cast
from urllib.parse import urlparse
import requests
import urllib3 # type: ignore
from .exceptions import *
from .instaloadercontext import InstaloaderContext, RateController
from .lateststamps import LatestStamps
from .nodeiterator import NodeIterator, resumable_iteration
from .structures import (Hashtag, Highlight, JsonExportable, Post, PostLocation, Profile, Story, StoryItem,
load_structure_from_file, save_structure_to_file, PostSidecarNode, TitlePic)
def _get_config_dir() -> str:
if platform.system() == "Windows":
# on Windows, use %LOCALAPPDATA%\Instaloader
localappdata = os.getenv("LOCALAPPDATA")
if localappdata is not None:
return os.path.join(localappdata, "Instaloader")
# legacy fallback - store in temp dir if %LOCALAPPDATA% is not set
return os.path.join(tempfile.gettempdir(), ".instaloader-" + getpass.getuser())
# on Unix, use ~/.config/instaloader
return os.path.join(os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config")), "instaloader")
def get_default_session_filename(username: str) -> str:
"""Returns default session filename for given username."""
configdir = _get_config_dir()
sessionfilename = "session-{}".format(username)
return os.path.join(configdir, sessionfilename)
def get_legacy_session_filename(username: str) -> str:
"""Returns legacy (until v4.4.3) default session filename for given username."""
dirname = tempfile.gettempdir() + "/" + ".instaloader-" + getpass.getuser()
filename = dirname + "/" + "session-" + username
return filename.lower()
def get_default_stamps_filename() -> str:
"""
Returns default filename for latest stamps database.
.. versionadded:: 4.8
"""
configdir = _get_config_dir()
return os.path.join(configdir, "latest-stamps.ini")
def format_string_contains_key(format_string: str, key: str) -> bool:
# pylint:disable=unused-variable
for literal_text, field_name, format_spec, conversion in string.Formatter().parse(format_string):
if field_name and (field_name == key or field_name.startswith(key + '.')):
return True
return False
def _requires_login(func: Callable) -> Callable:
"""Decorator to raise an exception if herewith-decorated function is called without being logged in"""
@wraps(func)
def call(instaloader, *args, **kwargs):
if not instaloader.context.is_logged_in:
raise LoginRequiredException("--login=USERNAME required.")
return func(instaloader, *args, **kwargs)
return call
def _retry_on_connection_error(func: Callable) -> Callable:
"""Decorator to retry the function max_connection_attemps number of times.
Herewith-decorated functions need an ``_attempt`` keyword argument.
This is to decorate functions that do network requests that may fail. Note that
:meth:`.get_json`, :meth:`.get_iphone_json`, :meth:`.graphql_query` and :meth:`.graphql_node_list` already have
their own logic for retrying, hence functions that only use these for network access must not be decorated with this
decorator."""
@wraps(func)
def call(instaloader, *args, **kwargs):
try:
return func(instaloader, *args, **kwargs)
except (urllib3.exceptions.HTTPError, requests.exceptions.RequestException, ConnectionException) as err:
error_string = "{}({}): {}".format(func.__name__, ', '.join([repr(arg) for arg in args]), err)
if (kwargs.get('_attempt') or 1) == instaloader.context.max_connection_attempts:
raise ConnectionException(error_string) from None
instaloader.context.error(error_string + " [retrying; skip with ^C]", repeat_at_end=False)
try:
if kwargs.get('_attempt'):
kwargs['_attempt'] += 1
else:
kwargs['_attempt'] = 2
instaloader.context.do_sleep()
return call(instaloader, *args, **kwargs)
except KeyboardInterrupt:
instaloader.context.error("[skipped by user]", repeat_at_end=False)
raise ConnectionException(error_string) from None
return call
class _ArbitraryItemFormatter(string.Formatter):
def __init__(self, item: Any):
self._item = item
def get_value(self, key, args, kwargs):
"""Override to substitute {ATTRIBUTE} by attributes of our _item."""
if key == 'filename' and isinstance(self._item, (Post, StoryItem, PostSidecarNode, TitlePic)):
return "{filename}"
if hasattr(self._item, key):
return getattr(self._item, key)
return super().get_value(key, args, kwargs)
def format_field(self, value, format_spec):
"""Override :meth:`string.Formatter.format_field` to have our
default format_spec for :class:`datetime.Datetime` objects, and to
let None yield an empty string rather than ``None``."""
if isinstance(value, datetime) and not format_spec:
return super().format_field(value, '%Y-%m-%d_%H-%M-%S')
if value is None:
return ''
return super().format_field(value, format_spec)
class _PostPathFormatter(_ArbitraryItemFormatter):
def get_value(self, key, args, kwargs):
ret = super().get_value(key, args, kwargs)
if not isinstance(ret, str):
return ret
return self.sanitize_path(ret)
@staticmethod
def sanitize_path(ret: str) -> str:
"""Replaces '/' with similar looking Division Slash and some other illegal filename characters on Windows."""
ret = ret.replace('/', '\u2215')
if platform.system() == 'Windows':
ret = ret.replace(':', '\uff1a').replace('<', '\ufe64').replace('>', '\ufe65').replace('\"', '\uff02')
ret = ret.replace('\\', '\ufe68').replace('|', '\uff5c').replace('?', '\ufe16').replace('*', '\uff0a')
ret = ret.replace('\n', ' ').replace('\r', ' ')
return ret
class Instaloader:
"""Instaloader Class.
:param quiet: :option:`--quiet`
:param user_agent: :option:`--user-agent`
:param dirname_pattern: :option:`--dirname-pattern`, default is ``{target}``
:param filename_pattern: :option:`--filename-pattern`, default is ``{date_utc}_UTC``
:param title_pattern:
:option:`--title-pattern`, default is ``{date_utc}_UTC_{typename}`` if ``dirname_pattern`` contains
``{target}`` or ``{profile}``, ``{target}_{date_utc}_UTC_{typename}`` otherwise.
:param download_pictures: not :option:`--no-pictures`
:param download_videos: not :option:`--no-videos`
:param download_video_thumbnails: not :option:`--no-video-thumbnails`
:param download_geotags: :option:`--geotags`
:param download_comments: :option:`--comments`
:param save_metadata: not :option:`--no-metadata-json`
:param compress_json: not :option:`--no-compress-json`
:param post_metadata_txt_pattern:
:option:`--post-metadata-txt`, default is ``{caption}``. Set to empty string to avoid creation of post metadata
txt file.
:param storyitem_metadata_txt_pattern: :option:`--storyitem-metadata-txt`, default is empty (=none)
:param max_connection_attempts: :option:`--max-connection-attempts`
:param request_timeout: :option:`--request-timeout`, set per-request timeout (seconds)
:param rate_controller: Generator for a :class:`RateController` to override rate controlling behavior
:param resume_prefix: :option:`--resume-prefix`, or None for :option:`--no-resume`.
:param check_resume_bbd: Whether to check the date of expiry of resume files and reject them if expired.
:param slide: :option:`--slide`
:param fatal_status_codes: :option:`--abort-on`
:param iphone_support: not :option:`--no-iphone`
.. attribute:: context
The associated :class:`InstaloaderContext` with low-level communication functions and logging.
"""
def __init__(self,
sleep: bool = True,
quiet: bool = False,
user_agent: Optional[str] = None,
dirname_pattern: Optional[str] = None,
filename_pattern: Optional[str] = None,
download_pictures=True,
download_videos: bool = True,
download_video_thumbnails: bool = True,
download_geotags: bool = False,
download_comments: bool = False,
save_metadata: bool = True,
compress_json: bool = True,
post_metadata_txt_pattern: str = None,
storyitem_metadata_txt_pattern: str = None,
max_connection_attempts: int = 3,
request_timeout: float = 300.0,
rate_controller: Optional[Callable[[InstaloaderContext], RateController]] = None,
resume_prefix: Optional[str] = "iterator",
check_resume_bbd: bool = True,
slide: Optional[str] = None,
fatal_status_codes: Optional[List[int]] = None,
iphone_support: bool = True,
title_pattern: Optional[str] = None,
proxy: Optional[str] = None):
self.context = InstaloaderContext(sleep, quiet, user_agent, max_connection_attempts,
request_timeout, rate_controller, fatal_status_codes,
iphone_support, proxy)
# configuration parameters
self.dirname_pattern = dirname_pattern or "{target}"
self.filename_pattern = filename_pattern or "{date_utc}_UTC"
if title_pattern is not None:
self.title_pattern = title_pattern
else:
if (format_string_contains_key(self.dirname_pattern, 'profile') or
format_string_contains_key(self.dirname_pattern, 'target')):
self.title_pattern = '{date_utc}_UTC_{typename}'
else:
self.title_pattern = '{target}_{date_utc}_UTC_{typename}'
self.download_pictures = download_pictures
self.download_videos = download_videos
self.download_video_thumbnails = download_video_thumbnails
self.download_geotags = download_geotags
self.download_comments = download_comments
self.save_metadata = save_metadata
self.compress_json = compress_json
self.post_metadata_txt_pattern = '{caption}' if post_metadata_txt_pattern is None \
else post_metadata_txt_pattern
self.storyitem_metadata_txt_pattern = '' if storyitem_metadata_txt_pattern is None \
else storyitem_metadata_txt_pattern
self.resume_prefix = resume_prefix
self.check_resume_bbd = check_resume_bbd
self.slide = slide or ""
self.slide_start = 0
self.slide_end = -1
if self.slide != "":
splitted = self.slide.split('-')
if len(splitted) == 1:
if splitted[0] == 'last':
# download only last image of a sidecar
self.slide_start = -1
else:
if int(splitted[0]) > 0:
self.slide_start = self.slide_end = int(splitted[0])-1
else:
raise InvalidArgumentException("--slide parameter must be greater than 0.")
elif len(splitted) == 2:
if splitted[1] == 'last':
self.slide_start = int(splitted[0])-1
elif 0 < int(splitted[0]) < int(splitted[1]):
self.slide_start = int(splitted[0])-1
self.slide_end = int(splitted[1])-1
else:
raise InvalidArgumentException("Invalid data for --slide parameter.")
else:
raise InvalidArgumentException("Invalid data for --slide parameter.")
@contextmanager
def anonymous_copy(self):
"""Yield an anonymous, otherwise equally-configured copy of an Instaloader instance; Then copy its error log."""
new_loader = Instaloader(
sleep=self.context.sleep,
quiet=self.context.quiet,
user_agent=self.context.user_agent,
dirname_pattern=self.dirname_pattern,
filename_pattern=self.filename_pattern,
download_pictures=self.download_pictures,
download_videos=self.download_videos,
download_video_thumbnails=self.download_video_thumbnails,
download_geotags=self.download_geotags,
download_comments=self.download_comments,
save_metadata=self.save_metadata,
compress_json=self.compress_json,
post_metadata_txt_pattern=self.post_metadata_txt_pattern,
storyitem_metadata_txt_pattern=self.storyitem_metadata_txt_pattern,
max_connection_attempts=self.context.max_connection_attempts,
request_timeout=self.context.request_timeout,
resume_prefix=self.resume_prefix,
check_resume_bbd=self.check_resume_bbd,
slide=self.slide,
fatal_status_codes=self.context.fatal_status_codes,
iphone_support=self.context.iphone_support)
yield new_loader
self.context.error_log.extend(new_loader.context.error_log)
new_loader.context.error_log = [] # avoid double-printing of errors
new_loader.close()
def close(self):
"""Close associated session objects and repeat error log."""
self.context.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
@_retry_on_connection_error
def download_pic(self, filename: str, url: str, mtime: datetime,
filename_suffix: Optional[str] = None, _attempt: int = 1) -> bool:
"""Downloads and saves picture with given url under given directory with given timestamp.
Returns true, if file was actually downloaded, i.e. updated."""
urlmatch = re.search('\\.[a-z0-9]*\\?', url)
file_extension = url[-3:] if urlmatch is None else urlmatch.group(0)[1:-1]
if filename_suffix is not None:
filename += '_' + filename_suffix
filename += '.' + file_extension
if os.path.isfile(filename):
self.context.log(filename + ' exists', end=' ', flush=True)
return False
self.context.get_and_write_raw(url, filename)
os.utime(filename, (datetime.now().timestamp(), mtime.timestamp()))
return True
def save_metadata_json(self, filename: str, structure: JsonExportable) -> None:
"""Saves metadata JSON file of a structure."""
if self.compress_json:
filename += '.json.xz'
else:
filename += '.json'
os.makedirs(os.path.dirname(filename), exist_ok=True)
save_structure_to_file(structure, filename)
if isinstance(structure, (Post, StoryItem)):
# log 'json ' message when saving Post or StoryItem
self.context.log('json', end=' ', flush=True)
def update_comments(self, filename: str, post: Post) -> None:
def _postcommentanswer_asdict(comment):
return {'id': comment.id,
'created_at': int(comment.created_at_utc.replace(tzinfo=timezone.utc).timestamp()),
'text': comment.text,
'owner': comment.owner._asdict(),
'likes_count': comment.likes_count}
def _postcomment_asdict(comment):
return {**_postcommentanswer_asdict(comment),
'answers': sorted([_postcommentanswer_asdict(answer) for answer in comment.answers],
key=lambda t: int(t['id']),
reverse=True)}
def get_unique_comments(comments, combine_answers=False):
if not comments:
return list()
comments_list = sorted(sorted(list(comments), key=lambda t: int(t['id'])),
key=lambda t: int(t['created_at']), reverse=True)
unique_comments_list = [comments_list[0]]
for x, y in zip(comments_list[:-1], comments_list[1:]):
if x['id'] != y['id']:
unique_comments_list.append(y)
else:
unique_comments_list[-1]['likes_count'] = y.get('likes_count')
if combine_answers:
combined_answers = unique_comments_list[-1].get('answers') or list()
if 'answers' in y:
combined_answers.extend(y['answers'])
unique_comments_list[-1]['answers'] = get_unique_comments(combined_answers)
return unique_comments_list
def get_new_comments(new_comments, start):
for idx, comment in enumerate(new_comments, start=start+1):
if idx % 250 == 0:
self.context.log('{}'.format(idx), end='…', flush=True)
yield comment
def save_comments(extended_comments):
unique_comments = get_unique_comments(extended_comments, combine_answers=True)
answer_ids = set(int(answer['id']) for comment in unique_comments for answer in comment.get('answers', []))
with open(filename, 'w') as file:
file.write(json.dumps(list(filter(lambda t: int(t['id']) not in answer_ids, unique_comments)),
indent=4))
base_filename = filename
filename += '_comments.json'
try:
with open(filename) as fp:
comments = json.load(fp)
except (FileNotFoundError, json.decoder.JSONDecodeError):
comments = list()
comments_iterator = post.get_comments()
try:
with resumable_iteration(
context=self.context,
iterator=comments_iterator,
load=load_structure_from_file,
save=save_structure_to_file,
format_path=lambda magic: "{}_{}_{}.json.xz".format(base_filename, self.resume_prefix, magic),
check_bbd=self.check_resume_bbd,
enabled=self.resume_prefix is not None
) as (_is_resuming, start_index):
comments.extend(_postcomment_asdict(comment)
for comment in get_new_comments(comments_iterator, start_index))
except (KeyboardInterrupt, AbortDownloadException):
if comments:
save_comments(comments)
raise
if comments:
save_comments(comments)
self.context.log('comments', end=' ', flush=True)
def save_caption(self, filename: str, mtime: datetime, caption: str) -> None:
"""Updates picture caption / Post metadata info"""
def _elliptify(caption):
pcaption = caption.replace('\n', ' ').strip()
return '[' + ((pcaption[:29] + u"\u2026") if len(pcaption) > 31 else pcaption) + ']'
filename += '.txt'
caption += '\n'
pcaption = _elliptify(caption)
bcaption = caption.encode("UTF-8")
with suppress(FileNotFoundError):
with open(filename, 'rb') as file:
file_caption = file.read()
if file_caption.replace(b'\r\n', b'\n') == bcaption.replace(b'\r\n', b'\n'):
try:
self.context.log(pcaption + ' unchanged', end=' ', flush=True)
except UnicodeEncodeError:
self.context.log('txt unchanged', end=' ', flush=True)
return None
else:
def get_filename(index):
return filename if index == 0 else '{0}_old_{2:02}{1}'.format(*os.path.splitext(filename), index)
i = 0
while os.path.isfile(get_filename(i)):
i = i + 1
for index in range(i, 0, -1):
os.rename(get_filename(index - 1), get_filename(index))
try:
self.context.log(_elliptify(file_caption.decode("UTF-8")) + ' updated', end=' ', flush=True)
except UnicodeEncodeError:
self.context.log('txt updated', end=' ', flush=True)
try:
self.context.log(pcaption, end=' ', flush=True)
except UnicodeEncodeError:
self.context.log('txt', end=' ', flush=True)
with open(filename, 'wb') as text_file:
with BytesIO(bcaption) as bio:
shutil.copyfileobj(cast(IO, bio), text_file)
os.utime(filename, (datetime.now().timestamp(), mtime.timestamp()))
def save_location(self, filename: str, location: PostLocation, mtime: datetime) -> None:
"""Save post location name and Google Maps link."""
filename += '_location.txt'
location_string = (location.name + "\n" +
"https://maps.google.com/maps?q={0},{1}&ll={0},{1}\n".format(location.lat,
location.lng))
with open(filename, 'wb') as text_file:
with BytesIO(location_string.encode()) as bio:
shutil.copyfileobj(cast(IO, bio), text_file)
os.utime(filename, (datetime.now().timestamp(), mtime.timestamp()))
self.context.log('geo', end=' ', flush=True)
def format_filename_within_target_path(self,
target: Union[str, Path],
owner_profile: Optional[Profile],
identifier: str,
name_suffix: str,
extension: str):
"""Returns a filename within the target path.
.. versionadded:: 4.5"""
if ((format_string_contains_key(self.dirname_pattern, 'profile') or
format_string_contains_key(self.dirname_pattern, 'target'))):
profile_str = owner_profile.username.lower() if owner_profile is not None else target
return os.path.join(self.dirname_pattern.format(profile=profile_str, target=target),
'{0}_{1}.{2}'.format(identifier, name_suffix, extension))
else:
return os.path.join(self.dirname_pattern.format(),
'{0}_{1}_{2}.{3}'.format(target, identifier, name_suffix, extension))
@_retry_on_connection_error
def download_title_pic(self, url: str, target: Union[str, Path], name_suffix: str, owner_profile: Optional[Profile],
_attempt: int = 1) -> None:
"""Downloads and saves a picture that does not have an association with a Post or StoryItem, such as a
Profile picture or a Highlight cover picture. Modification time is taken from the HTTP response headers.
.. versionadded:: 4.3"""
http_response = self.context.get_raw(url)
date_object = None # type: Optional[datetime]
if 'Last-Modified' in http_response.headers:
date_object = datetime.strptime(http_response.headers["Last-Modified"], '%a, %d %b %Y %H:%M:%S GMT')
date_object = date_object.replace(tzinfo=timezone.utc)
pic_bytes = None
else:
pic_bytes = http_response.content
ig_filename = url.split('/')[-1].split('?')[0]
pic_data = TitlePic(owner_profile, target, name_suffix, ig_filename, date_object)
dirname = _PostPathFormatter(pic_data).format(self.dirname_pattern, target=target)
filename_template = os.path.join(dirname,
_PostPathFormatter(pic_data).format(self.title_pattern, target=target))
filename = self.__prepare_filename(filename_template, lambda: url) + ".jpg"
content_length = http_response.headers.get('Content-Length', None)
if os.path.isfile(filename) and (not self.context.is_logged_in or
(content_length is not None and
os.path.getsize(filename) >= int(content_length))):
self.context.log(filename + ' already exists')
return
os.makedirs(os.path.dirname(filename), exist_ok=True)
self.context.write_raw(pic_bytes if pic_bytes else http_response, filename)
if date_object:
os.utime(filename, (datetime.now().timestamp(), date_object.timestamp()))
self.context.log('') # log output of _get_and_write_raw() does not produce \n
def download_profilepic_if_new(self, profile: Profile, latest_stamps: Optional[LatestStamps]) -> None:
"""
Downloads and saves profile pic if it has not been downloaded before.
:param latest_stamps: Database with the last downloaded data. If not present,
the profile pic is downloaded unless it already exists
.. versionadded:: 4.8
"""
if latest_stamps is None:
self.download_profilepic(profile)
return
profile_pic_basename = profile.profile_pic_url.split('/')[-1].split('?')[0]
saved_basename = latest_stamps.get_profile_pic(profile.username)
if saved_basename == profile_pic_basename:
return
self.download_profilepic(profile)
latest_stamps.set_profile_pic(profile.username, profile_pic_basename)
def download_profilepic(self, profile: Profile) -> None:
"""Downloads and saves profile pic."""
self.download_title_pic(profile.profile_pic_url, profile.username.lower(), 'profile_pic', profile)
def download_highlight_cover(self, highlight: Highlight, target: Union[str, Path]) -> None:
"""Downloads and saves Highlight cover picture.
.. versionadded:: 4.3"""
self.download_title_pic(highlight.cover_url, target, 'cover', highlight.owner_profile)
def download_hashtag_profilepic(self, hashtag: Hashtag) -> None:
"""Downloads and saves the profile picture of a Hashtag.
.. versionadded:: 4.4"""
self.download_title_pic(hashtag.profile_pic_url, '#' + hashtag.name, 'profile_pic', None)
@_requires_login
def save_session_to_file(self, filename: Optional[str] = None) -> None:
"""Saves internally stored :class:`requests.Session` object.
:param filename: Filename, or None to use default filename.
:raises LoginRequiredException: If called without being logged in.
"""
if filename is None:
assert self.context.username is not None
filename = get_default_session_filename(self.context.username)
dirname = os.path.dirname(filename)
if dirname != '' and not os.path.exists(dirname):
os.makedirs(dirname)
os.chmod(dirname, 0o700)
with open(filename, 'wb') as sessionfile:
os.chmod(filename, 0o600)
self.context.save_session_to_file(sessionfile)
self.context.log("Saved session to %s." % filename)
def load_session_from_file(self, username: str, filename: Optional[str] = None) -> None:
"""Internally stores :class:`requests.Session` object loaded from file.
If filename is None, the file with the default session path is loaded.
:raises FileNotFoundError: If the file does not exist.
"""
if filename is None:
filename = get_default_session_filename(username)
if not os.path.exists(filename):
filename = get_legacy_session_filename(username)
with open(filename, 'rb') as sessionfile:
self.context.load_session_from_file(username, sessionfile)
self.context.log("Loaded session from %s." % filename)
def test_login(self) -> Optional[str]:
"""Returns the Instagram username to which given :class:`requests.Session` object belongs, or None."""
return self.context.test_login()
def login(self, user: str, passwd: str) -> None:
"""Log in to instagram with given username and password and internally store session object.
:raises InvalidArgumentException: If the provided username does not exist.
:raises BadCredentialsException: If the provided password is wrong.
:raises ConnectionException: If connection to Instagram failed.
:raises TwoFactorAuthRequiredException: First step of 2FA login done, now call
:meth:`Instaloader.two_factor_login`."""
self.context.login(user, passwd)
def two_factor_login(self, two_factor_code) -> None:
"""Second step of login if 2FA is enabled.
Not meant to be used directly, use :meth:`Instaloader.two_factor_login`.
:raises InvalidArgumentException: No two-factor authentication pending.
:raises BadCredentialsException: 2FA verification code invalid.
.. versionadded:: 4.2"""
self.context.two_factor_login(two_factor_code)
@staticmethod
def __prepare_filename(filename_template: str, url: Callable[[], str]) -> str:
"""Replace filename token inside filename_template with url's filename and assure the directories exist.
.. versionadded:: 4.6"""
if "{filename}" in filename_template:
filename = filename_template.replace("{filename}",
os.path.splitext(os.path.basename(urlparse(url()).path))[0])
else:
filename = filename_template
os.makedirs(os.path.dirname(filename), exist_ok=True)
return filename
def format_filename(self, item: Union[Post, StoryItem, PostSidecarNode, TitlePic],
target: Optional[Union[str, Path]] = None):
"""Format filename of a :class:`Post` or :class:`StoryItem` according to ``filename-pattern`` parameter.
.. versionadded:: 4.1"""
return _PostPathFormatter(item).format(self.filename_pattern, target=target)
def download_post(self, post: Post, target: Union[str, Path]) -> bool:
"""
Download everything associated with one instagram post node, i.e. picture, caption and video.
:param post: Post to download.
:param target: Target name, i.e. profile name, #hashtag, :feed; for filename.
:return: True if something was downloaded, False otherwise, i.e. file was already there
"""
def _already_downloaded(path: str) -> bool:
if not os.path.isfile(path):
return False
else:
self.context.log(path + ' exists', end=' ', flush=True)
return True
def _all_already_downloaded(path_base, is_videos_enumerated) -> bool:
if '{filename}' in self.filename_pattern:
# full URL needed to evaluate actual filename, cannot determine at
# this point if all sidecar nodes were already downloaded.
return False
for idx, is_video in is_videos_enumerated:
if self.download_pictures and (not is_video or self.download_video_thumbnails):
if not _already_downloaded("{0}_{1}.jpg".format(path_base, idx)):
return False
if is_video and self.download_videos:
if not _already_downloaded("{0}_{1}.mp4".format(path_base, idx)):
return False
return True
dirname = _PostPathFormatter(post).format(self.dirname_pattern, target=target)
filename_template = os.path.join(dirname, self.format_filename(post, target=target))
filename = self.__prepare_filename(filename_template, lambda: post.url)
# Download the image(s) / video thumbnail and videos within sidecars if desired
downloaded = True
if post.typename == 'GraphSidecar':
if self.download_pictures or self.download_videos:
if not _all_already_downloaded(
filename_template, enumerate(
(post.get_is_videos()[i]
for i in range(self.slide_start % post.mediacount, self.slide_end % post.mediacount + 1)),
start=self.slide_start % post.mediacount + 1
)
):
for edge_number, sidecar_node in enumerate(
post.get_sidecar_nodes(self.slide_start, self.slide_end),
start=self.slide_start % post.mediacount + 1
):
suffix = str(edge_number) # type: Optional[str]
if '{filename}' in self.filename_pattern:
suffix = None
if self.download_pictures and (not sidecar_node.is_video or self.download_video_thumbnails):
# pylint:disable=cell-var-from-loop
sidecar_filename = self.__prepare_filename(filename_template,
lambda: sidecar_node.display_url)
# Download sidecar picture or video thumbnail (--no-pictures implies --no-video-thumbnails)
downloaded &= self.download_pic(filename=sidecar_filename, url=sidecar_node.display_url,
mtime=post.date_local, filename_suffix=suffix)
if sidecar_node.is_video and self.download_videos:
# pylint:disable=cell-var-from-loop
sidecar_filename = self.__prepare_filename(filename_template,
lambda: sidecar_node.video_url)
# Download sidecar video if desired
downloaded &= self.download_pic(filename=sidecar_filename, url=sidecar_node.video_url,
mtime=post.date_local, filename_suffix=suffix)
else:
downloaded = False
elif post.typename == 'GraphImage':
# Download picture
if self.download_pictures:
downloaded = (not _already_downloaded(filename + ".jpg") and
self.download_pic(filename=filename, url=post.url, mtime=post.date_local))
elif post.typename == 'GraphVideo':
# Download video thumbnail (--no-pictures implies --no-video-thumbnails)
if self.download_pictures and self.download_video_thumbnails:
with self.context.error_catcher("Video thumbnail of {}".format(post)):
downloaded = (not _already_downloaded(filename + ".jpg") and
self.download_pic(filename=filename, url=post.url, mtime=post.date_local))
else:
self.context.error("Warning: {0} has unknown typename: {1}".format(post, post.typename))
# Save caption if desired
metadata_string = _ArbitraryItemFormatter(post).format(self.post_metadata_txt_pattern).strip()
if metadata_string:
self.save_caption(filename=filename, mtime=post.date_local, caption=metadata_string)
# Download video if desired
if post.is_video and self.download_videos:
downloaded &= (not _already_downloaded(filename + ".mp4") and
self.download_pic(filename=filename, url=post.video_url, mtime=post.date_local))
# Download geotags if desired
if self.download_geotags and post.location:
self.save_location(filename, post.location, post.date_local)
# Update comments if desired
if self.download_comments:
self.update_comments(filename=filename, post=post)
# Save metadata as JSON if desired.
if self.save_metadata:
self.save_metadata_json(filename, post)
self.context.log()
return downloaded
@_requires_login
def get_stories(self, userids: Optional[List[int]] = None) -> Iterator[Story]:
"""Get available stories from followees or all stories of users whose ID are given.
Does not mark stories as seen.
To use this, one needs to be logged in
:param userids: List of user IDs to be processed in terms of downloading their stories, or None.
:raises LoginRequiredException: If called without being logged in.
"""
if not userids:
data = self.context.graphql_query("d15efd8c0c5b23f0ef71f18bf363c704",
{"only_stories": True})["data"]["user"]
if data is None:
raise BadResponseException('Bad stories reel JSON.')
userids = list(edge["node"]["id"] for edge in data["feed_reels_tray"]["edge_reels_tray_to_reel"]["edges"])
def _userid_chunks():
assert userids is not None
userids_per_query = 50
for i in range(0, len(userids), userids_per_query):
yield userids[i:i + userids_per_query]
for userid_chunk in _userid_chunks():
stories = self.context.graphql_query("303a4ae99711322310f25250d988f3b7",
{"reel_ids": userid_chunk, "precomposed_overlay": False})["data"]
yield from (Story(self.context, media) for media in stories['reels_media'])
@_requires_login
def download_stories(self,
userids: Optional[List[Union[int, Profile]]] = None,
fast_update: bool = False,
filename_target: Optional[str] = ':stories',
storyitem_filter: Optional[Callable[[StoryItem], bool]] = None,
latest_stamps: Optional[LatestStamps] = None) -> None:
"""
Download available stories from user followees or all stories of users whose ID are given.
Does not mark stories as seen.
To use this, one needs to be logged in
:param userids: List of user IDs or Profiles to be processed in terms of downloading their stories
:param fast_update: If true, abort when first already-downloaded picture is encountered
:param filename_target: Replacement for {target} in dirname_pattern and filename_pattern
or None if profile name should be used instead
:param storyitem_filter: function(storyitem), which returns True if given StoryItem should be downloaded
:param latest_stamps: Database with the last times each user was scraped
:raises LoginRequiredException: If called without being logged in.
.. versionchanged:: 4.8
Add `latest_stamps` parameter.
"""
if not userids:
self.context.log("Retrieving all visible stories...")
profile_count = None
else:
userids = [p if isinstance(p, int) else p.userid for p in userids]
profile_count = len(userids)
for i, user_story in enumerate(self.get_stories(userids), start=1):
name = user_story.owner_username
if profile_count is not None:
msg = "[{0:{w}d}/{1:{w}d}] Retrieving stories from profile {2}.".format(i, profile_count, name,
w=len(str(profile_count)))
else:
msg = "[{:3d}] Retrieving stories from profile {}.".format(i, name)
self.context.log(msg)
totalcount = user_story.itemcount
count = 1
if latest_stamps is not None:
# pylint:disable=cell-var-from-loop
last_scraped = latest_stamps.get_last_story_timestamp(name)
scraped_timestamp = datetime.now().astimezone()
for item in user_story.get_items():
if latest_stamps is not None and item.date_utc.replace(tzinfo=timezone.utc) <= last_scraped:
break
if storyitem_filter is not None and not storyitem_filter(item):
self.context.log("<{} skipped>".format(item), flush=True)
continue
self.context.log("[%3i/%3i] " % (count, totalcount), end="", flush=True)
count += 1
with self.context.error_catcher('Download story from user {}'.format(name)):
downloaded = self.download_storyitem(item, filename_target if filename_target else name)
if fast_update and not downloaded:
break
if latest_stamps is not None:
latest_stamps.set_last_story_timestamp(name, scraped_timestamp)
def download_storyitem(self, item: StoryItem, target: Union[str, Path]) -> bool:
"""Download one user story.
:param item: Story item, as in story['items'] for story in :meth:`get_stories`
:param target: Replacement for {target} in dirname_pattern and filename_pattern
:return: True if something was downloaded, False otherwise, i.e. file was already there
"""
def _already_downloaded(path: str) -> bool:
if not os.path.isfile(path):
return False
else:
self.context.log(path + ' exists', end=' ', flush=True)
return True
date_local = item.date_local
dirname = _PostPathFormatter(item).format(self.dirname_pattern, target=target)
filename_template = os.path.join(dirname, self.format_filename(item, target=target))
filename = self.__prepare_filename(filename_template, lambda: item.url)
downloaded = False
if not item.is_video or self.download_video_thumbnails is True:
downloaded = (not _already_downloaded(filename + ".jpg") and
self.download_pic(filename=filename, url=item.url, mtime=date_local))
if item.is_video and self.download_videos is True:
filename = self.__prepare_filename(filename_template, lambda: str(item.video_url))
downloaded |= (not _already_downloaded(filename + ".mp4") and
self.download_pic(filename=filename, url=item.video_url, mtime=date_local))
# Save caption if desired
metadata_string = _ArbitraryItemFormatter(item).format(self.storyitem_metadata_txt_pattern).strip()
if metadata_string:
self.save_caption(filename=filename, mtime=item.date_local, caption=metadata_string)
# Save metadata as JSON if desired.
if self.save_metadata is not False:
self.save_metadata_json(filename, item)
self.context.log()
return downloaded
@_requires_login
def get_highlights(self, user: Union[int, Profile]) -> Iterator[Highlight]:
"""Get all highlights from a user.
To use this, one needs to be logged in.
.. versionadded:: 4.1
:param user: ID or Profile of the user whose highlights should get fetched.
:raises LoginRequiredException: If called without being logged in.
"""
userid = user if isinstance(user, int) else user.userid
data = self.context.graphql_query("7c16654f22c819fb63d1183034a5162f",
{"user_id": userid, "include_chaining": False, "include_reel": False,
"include_suggested_users": False, "include_logged_out_extras": False,
"include_highlight_reels": True})["data"]["user"]['edge_highlight_reels']
if data is None:
raise BadResponseException('Bad highlights reel JSON.')
yield from (Highlight(self.context, edge['node'], user if isinstance(user, Profile) else None)
for edge in data['edges'])
@_requires_login
def download_highlights(self,
user: Union[int, Profile],
fast_update: bool = False,
filename_target: Optional[str] = None,
storyitem_filter: Optional[Callable[[StoryItem], bool]] = None) -> None:
"""
Download available highlights from a user whose ID is given.
To use this, one needs to be logged in.
.. versionadded:: 4.1
.. versionchanged:: 4.3
Also downloads and saves the Highlight's cover pictures.
:param user: ID or Profile of the user whose highlights should get downloaded.
:param fast_update: If true, abort when first already-downloaded picture is encountered
:param filename_target: Replacement for {target} in dirname_pattern and filename_pattern
or None if profile name and the highlights' titles should be used instead
:param storyitem_filter: function(storyitem), which returns True if given StoryItem should be downloaded
:raises LoginRequiredException: If called without being logged in.
"""
for user_highlight in self.get_highlights(user):
name = user_highlight.owner_username
highlight_target = (filename_target
if filename_target
else (Path(_PostPathFormatter.sanitize_path(name)) /
_PostPathFormatter.sanitize_path(user_highlight.title))) # type: Union[str, Path]
self.context.log("Retrieving highlights \"{}\" from profile {}".format(user_highlight.title, name))
self.download_highlight_cover(user_highlight, highlight_target)
totalcount = user_highlight.itemcount
count = 1
for item in user_highlight.get_items():
if storyitem_filter is not None and not storyitem_filter(item):
self.context.log("<{} skipped>".format(item), flush=True)
continue
self.context.log("[%3i/%3i] " % (count, totalcount), end="", flush=True)
count += 1
with self.context.error_catcher('Download highlights \"{}\" from user {}'.format(user_highlight.title,
name)):
downloaded = self.download_storyitem(item, highlight_target)
if fast_update and not downloaded:
break
def posts_download_loop(self,
posts: Iterator[Post],
target: Union[str, Path],
fast_update: bool = False,
post_filter: Optional[Callable[[Post], bool]] = None,
max_count: Optional[int] = None,
total_count: Optional[int] = None,
owner_profile: Optional[Profile] = None,
takewhile: Optional[Callable[[Post], bool]] = None) -> None:
"""
Download the Posts returned by given Post Iterator.
.. versionadded:: 4.4
.. versionchanged:: 4.5
Transparently resume an aborted operation if `posts` is a :class:`NodeIterator`.
.. versionchanged:: 4.8
Add `takewhile` parameter.
:param posts: Post Iterator to loop through.
:param target: Target name.
:param fast_update: :option:`--fast-update`.
:param post_filter: :option:`--post-filter`.
:param max_count: Maximum count of Posts to download (:option:`--count`).
:param total_count: Total number of posts returned by given iterator.
:param owner_profile: Associated profile, if any.
:param takewhile: Expression evaluated for each post. Once it returns false, downloading stops.
"""
displayed_count = (max_count if total_count is None or max_count is not None and max_count < total_count
else total_count)
sanitized_target = target
if isinstance(target, str):
sanitized_target = _PostPathFormatter.sanitize_path(target)
if takewhile is None:
takewhile = lambda _: True
with resumable_iteration(
context=self.context,
iterator=posts,
load=load_structure_from_file,
save=save_structure_to_file,
format_path=lambda magic: self.format_filename_within_target_path(
sanitized_target, owner_profile, self.resume_prefix or '', magic, 'json.xz'
),
check_bbd=self.check_resume_bbd,
enabled=self.resume_prefix is not None
) as (is_resuming, start_index):
for number, post in enumerate(posts, start=start_index + 1):
if (max_count is not None and number > max_count) or not takewhile(post):
break
if displayed_count is not None:
self.context.log("[{0:{w}d}/{1:{w}d}] ".format(number, displayed_count,
w=len(str(displayed_count))),
end="", flush=True)
else:
self.context.log("[{:3d}] ".format(number), end="", flush=True)
if post_filter is not None:
try:
if not post_filter(post):
self.context.log("{} skipped".format(post))
continue
except (InstaloaderException, KeyError, TypeError) as err:
self.context.error("{} skipped. Filter evaluation failed: {}".format(post, err))
continue
with self.context.error_catcher("Download {} of {}".format(post, target)):
# The PostChangedException gets raised if the Post's id/shortcode changed while obtaining
# additional metadata. This is most likely the case if a HTTP redirect takes place while
# resolving the shortcode URL.
# The `post_changed` variable keeps the fast-update functionality alive: A Post which is
# obained after a redirect has probably already been downloaded as a previous Post of the
# same Profile.
# Observed in issue #225: https://github.com/instaloader/instaloader/issues/225
post_changed = False
while True:
try:
downloaded = self.download_post(post, target=target)
break
except PostChangedException:
post_changed = True
continue
if fast_update and not downloaded and not post_changed:
# disengage fast_update for first post when resuming
if not is_resuming or number > 0:
break
@_requires_login
def get_feed_posts(self) -> Iterator[Post]:
"""Get Posts of the user's feed.
:return: Iterator over Posts of the user's feed.
:raises LoginRequiredException: If called without being logged in.
"""
data = self.context.graphql_query("d6f4427fbe92d846298cf93df0b937d3", {})["data"]
while True:
feed = data["user"]["edge_web_feed_timeline"]
for edge in feed["edges"]:
node = edge["node"]
if node.get("__typename") in Post.supported_graphql_types() and node.get("shortcode") is not None:
yield Post(self.context, node)
if not feed["page_info"]["has_next_page"]:
break
data = self.context.graphql_query("d6f4427fbe92d846298cf93df0b937d3",
{'fetch_media_item_count': 12,
'fetch_media_item_cursor': feed["page_info"]["end_cursor"],
'fetch_comment_count': 4,
'fetch_like': 10,
'has_stories': False})["data"]
@_requires_login
def download_feed_posts(self, max_count: int = None, fast_update: bool = False,
post_filter: Optional[Callable[[Post], bool]] = None) -> None:
"""
Download pictures from the user's feed.
Example to download up to the 20 pics the user last liked::
loader = Instaloader()
loader.load_session_from_file('USER')
loader.download_feed_posts(max_count=20, fast_update=True,
post_filter=lambda post: post.viewer_has_liked)
:param max_count: Maximum count of pictures to download
:param fast_update: If true, abort when first already-downloaded picture is encountered
:param post_filter: function(post), which returns True if given picture should be downloaded
:raises LoginRequiredException: If called without being logged in.
"""
self.context.log("Retrieving pictures from your feed...")
self.posts_download_loop(self.get_feed_posts(), ":feed", fast_update, post_filter, max_count=max_count)
@_requires_login
def download_saved_posts(self, max_count: int = None, fast_update: bool = False,
post_filter: Optional[Callable[[Post], bool]] = None) -> None:
"""Download user's saved pictures.
:param max_count: Maximum count of pictures to download
:param fast_update: If true, abort when first already-downloaded picture is encountered
:param post_filter: function(post), which returns True if given picture should be downloaded
:raises LoginRequiredException: If called without being logged in.
"""
self.context.log("Retrieving saved posts...")
assert self.context.username is not None # safe due to @_requires_login; required by typechecker
node_iterator = Profile.own_profile(self.context).get_saved_posts()
self.posts_download_loop(node_iterator, ":saved",
fast_update, post_filter,
max_count=max_count, total_count=node_iterator.count)
@_requires_login
def get_location_posts(self, location: str) -> Iterator[Post]:
"""Get Posts which are listed by Instagram for a given Location.
:return: Iterator over Posts of a location's posts
:raises LoginRequiredException: If called without being logged in.
.. versionadded:: 4.2
.. versionchanged:: 4.2.9
Require being logged in (as required by Instagram)
"""
has_next_page = True
end_cursor = None
while has_next_page:
if end_cursor:
params = {'__a': 1, 'max_id': end_cursor}
else:
params = {'__a': 1}
location_data = self.context.get_json('explore/locations/{0}/'.format(location),
params)['graphql']['location']['edge_location_to_media']
yield from (Post(self.context, edge['node']) for edge in location_data['edges'])
has_next_page = location_data['page_info']['has_next_page']
end_cursor = location_data['page_info']['end_cursor']
@_requires_login
def download_location(self, location: str,
max_count: Optional[int] = None,
post_filter: Optional[Callable[[Post], bool]] = None,
fast_update: bool = False) -> None:
"""Download pictures of one location.
To download the last 30 pictures with location 362629379, do::
loader = Instaloader()
loader.download_location(362629379, max_count=30)
:param location: Location to download, as Instagram numerical ID
:param max_count: Maximum count of pictures to download
:param post_filter: function(post), which returns True if given picture should be downloaded
:param fast_update: If true, abort when first already-downloaded picture is encountered
:raises LoginRequiredException: If called without being logged in.
.. versionadded:: 4.2
.. versionchanged:: 4.2.9
Require being logged in (as required by Instagram)
"""
self.context.log("Retrieving pictures for location {}...".format(location))
self.posts_download_loop(self.get_location_posts(location), "%" + location, fast_update, post_filter,
max_count=max_count)
@_requires_login
def get_explore_posts(self) -> NodeIterator[Post]:
"""Get Posts which are worthy of exploring suggested by Instagram.
:return: Iterator over Posts of the user's suggested posts.
:rtype: NodeIterator[Post]
:raises LoginRequiredException: If called without being logged in.
"""
return NodeIterator(
self.context,
'df0dcc250c2b18d9fd27c5581ef33c7c',
lambda d: d['data']['user']['edge_web_discover_media'],
lambda n: Post(self.context, n),
query_referer='https://www.instagram.com/explore/',
)
def get_hashtag_posts(self, hashtag: str) -> Iterator[Post]:
"""Get Posts associated with a #hashtag.
.. deprecated:: 4.4
Use :meth:`Hashtag.get_posts`."""
return Hashtag.from_name(self.context, hashtag).get_posts()
def download_hashtag(self, hashtag: Union[Hashtag, str],
max_count: Optional[int] = None,
post_filter: Optional[Callable[[Post], bool]] = None,
fast_update: bool = False,
profile_pic: bool = True,
posts: bool = True) -> None:
"""Download pictures of one hashtag.
To download the last 30 pictures with hashtag #cat, do::
loader = Instaloader()
loader.download_hashtag('cat', max_count=30)
:param hashtag: Hashtag to download, as instance of :class:`Hashtag`, or string without leading '#'
:param max_count: Maximum count of pictures to download
:param post_filter: function(post), which returns True if given picture should be downloaded
:param fast_update: If true, abort when first already-downloaded picture is encountered
:param profile_pic: not :option:`--no-profile-pic`.
:param posts: not :option:`--no-posts`.
.. versionchanged:: 4.4
Add parameters `profile_pic` and `posts`.
"""
if isinstance(hashtag, str):
with self.context.error_catcher("Get hashtag #{}".format(hashtag)):
hashtag = Hashtag.from_name(self.context, hashtag)
if not isinstance(hashtag, Hashtag):
return
target = "#" + hashtag.name
if profile_pic:
with self.context.error_catcher("Download profile picture of {}".format(target)):
self.download_hashtag_profilepic(hashtag)
if posts:
self.context.log("Retrieving pictures with hashtag #{}...".format(hashtag.name))
self.posts_download_loop(hashtag.get_all_posts(), target, fast_update, post_filter,
max_count=max_count)
if self.save_metadata:
json_filename = '{0}/{1}'.format(self.dirname_pattern.format(profile=target,
target=target),
target)
self.save_metadata_json(json_filename, hashtag)
def download_tagged(self, profile: Profile, fast_update: bool = False,
target: Optional[str] = None,
post_filter: Optional[Callable[[Post], bool]] = None,
latest_stamps: Optional[LatestStamps] = None) -> None:
"""Download all posts where a profile is tagged.
.. versionadded:: 4.1
.. versionchanged:: 4.8
Add `latest_stamps` parameter."""
self.context.log("Retrieving tagged posts for profile {}.".format(profile.username))
posts_takewhile: Optional[Callable[[Post], bool]] = None
if latest_stamps is not None:
last_scraped = latest_stamps.get_last_tagged_timestamp(profile.username)
posts_takewhile = lambda p: p.date_utc.replace(tzinfo=timezone.utc) > last_scraped
tagged_posts = profile.get_tagged_posts()
self.posts_download_loop(tagged_posts,
target if target
else (Path(_PostPathFormatter.sanitize_path(profile.username)) /
_PostPathFormatter.sanitize_path(':tagged')),
fast_update, post_filter, takewhile=posts_takewhile)
if latest_stamps is not None and tagged_posts.first_item is not None:
latest_stamps.set_last_tagged_timestamp(profile.username, tagged_posts.first_item.date_local.astimezone())
def download_igtv(self, profile: Profile, fast_update: bool = False,
post_filter: Optional[Callable[[Post], bool]] = None,
latest_stamps: Optional[LatestStamps] = None) -> None:
"""Download IGTV videos of a profile.
.. versionadded:: 4.3
.. versionchanged:: 4.8
Add `latest_stamps` parameter."""
self.context.log("Retrieving IGTV videos for profile {}.".format(profile.username))
posts_takewhile: Optional[Callable[[Post], bool]] = None
if latest_stamps is not None:
last_scraped = latest_stamps.get_last_igtv_timestamp(profile.username)
posts_takewhile = lambda p: p.date_utc.replace(tzinfo=timezone.utc) > last_scraped
igtv_posts = profile.get_igtv_posts()
self.posts_download_loop(igtv_posts, profile.username, fast_update, post_filter,
total_count=profile.igtvcount, owner_profile=profile, takewhile=posts_takewhile)
if latest_stamps is not None and igtv_posts.first_item is not None:
latest_stamps.set_last_igtv_timestamp(profile.username, igtv_posts.first_item.date_local.astimezone())
def _get_id_filename(self, profile_name: str) -> str:
if ((format_string_contains_key(self.dirname_pattern, 'profile') or
format_string_contains_key(self.dirname_pattern, 'target'))):
return os.path.join(self.dirname_pattern.format(profile=profile_name.lower(),
target=profile_name.lower()),
'id')
else:
return os.path.join(self.dirname_pattern.format(),
'{0}_id'.format(profile_name.lower()))
def load_profile_id(self, profile_name: str) -> Optional[int]:
"""
Load ID of profile from profile directory.
.. versionadded:: 4.8
"""
id_filename = self._get_id_filename(profile_name)
try:
with open(id_filename, 'rb') as id_file:
return int(id_file.read())
except (FileNotFoundError, ValueError):
return None
def save_profile_id(self, profile: Profile):
"""
Store ID of profile on profile directory.
.. versionadded:: 4.0.6
"""
os.makedirs(self.dirname_pattern.format(profile=profile.username,
target=profile.username), exist_ok=True)
with open(self._get_id_filename(profile.username), 'w') as text_file:
text_file.write(str(profile.userid) + "\n")
self.context.log("Stored ID {0} for profile {1}.".format(profile.userid, profile.username))
def check_profile_id(self, profile_name: str, latest_stamps: Optional[LatestStamps] = None) -> Profile:
"""
Consult locally stored ID of profile with given name, check whether ID matches and whether name
has changed and return current name of the profile, and store ID of profile.
:param profile_name: Profile name
:param latest_stamps: Database of downloaded data. If present, IDs are retrieved from it,
otherwise from the target directory
:return: Instance of current profile
.. versionchanged:: 4.8
Add `latest_stamps` parameter.
"""
profile = None
profile_name_not_exists_err = None
try:
profile = Profile.from_username(self.context, profile_name)
except ProfileNotExistsException as err:
profile_name_not_exists_err = err
if latest_stamps is None:
profile_id = self.load_profile_id(profile_name)
else:
profile_id = latest_stamps.get_profile_id(profile_name)
if profile_id is not None:
if (profile is None) or \
(profile_id != profile.userid):
if profile is not None:
self.context.log("Profile {0} does not match the stored unique ID {1}.".format(profile_name,
profile_id))
else:
self.context.log("Trying to find profile {0} using its unique ID {1}.".format(profile_name,
profile_id))
profile_from_id = Profile.from_id(self.context, profile_id)
newname = profile_from_id.username
self.context.error("Profile {0} has changed its name to {1}.".format(profile_name, newname))
if latest_stamps is None:
if ((format_string_contains_key(self.dirname_pattern, 'profile') or
format_string_contains_key(self.dirname_pattern, 'target'))):
os.rename(self.dirname_pattern.format(profile=profile_name.lower(),
target=profile_name.lower()),
self.dirname_pattern.format(profile=newname.lower(),
target=newname.lower()))
else:
os.rename('{0}/{1}_id'.format(self.dirname_pattern.format(), profile_name.lower()),
'{0}/{1}_id'.format(self.dirname_pattern.format(), newname.lower()))
else:
latest_stamps.rename_profile(profile_name, newname)
return profile_from_id
# profile exists and profile id matches saved id
return profile
if profile is not None:
if latest_stamps is None:
self.save_profile_id(profile)
else:
latest_stamps.save_profile_id(profile.username, profile.userid)
return profile
if profile_name_not_exists_err:
raise profile_name_not_exists_err
raise ProfileNotExistsException("Profile {0} does not exist.".format(profile_name))
def download_profiles(self, profiles: Set[Profile],
profile_pic: bool = True, posts: bool = True,
tagged: bool = False,
igtv: bool = False,
highlights: bool = False,
stories: bool = False,
fast_update: bool = False,
post_filter: Optional[Callable[[Post], bool]] = None,
storyitem_filter: Optional[Callable[[Post], bool]] = None,
raise_errors: bool = False,
latest_stamps: Optional[LatestStamps] = None):
"""High-level method to download set of profiles.
:param profiles: Set of profiles to download.
:param profile_pic: not :option:`--no-profile-pic`.
:param posts: not :option:`--no-posts`.
:param tagged: :option:`--tagged`.
:param igtv: :option:`--igtv`.
:param highlights: :option:`--highlights`.
:param stories: :option:`--stories`.
:param fast_update: :option:`--fast-update`.
:param post_filter: :option:`--post-filter`.
:param storyitem_filter: :option:`--post-filter`.
:param raise_errors:
Whether :exc:`LoginRequiredException` and :exc:`PrivateProfileNotFollowedException` should be raised or
catched and printed with :meth:`InstaloaderContext.error_catcher`.
:param latest_stamps: :option:`--latest-stamps`.
.. versionadded:: 4.1
.. versionchanged:: 4.3
Add `igtv` parameter.
.. versionchanged:: 4.8
Add `latest_stamps` parameter.
"""
@contextmanager
def _error_raiser(_str):
yield
# error_handler type is Callable[[Optional[str]], ContextManager[None]] (not supported with Python 3.5.0..3.5.3)
error_handler = _error_raiser if raise_errors else self.context.error_catcher
for i, profile in enumerate(profiles, start=1):
self.context.log("[{0:{w}d}/{1:{w}d}] Downloading profile {2}".format(i, len(profiles), profile.username,
w=len(str(len(profiles)))))
with error_handler(profile.username): # type: ignore # (ignore type for Python 3.5 support)
profile_name = profile.username
# Download profile picture
if profile_pic:
with self.context.error_catcher('Download profile picture of {}'.format(profile_name)):
self.download_profilepic_if_new(profile, latest_stamps)
# Save metadata as JSON if desired.
if self.save_metadata:
json_filename = os.path.join(self.dirname_pattern.format(profile=profile_name,
target=profile_name),
'{0}_{1}'.format(profile_name, profile.userid))
self.save_metadata_json(json_filename, profile)
# Catch some errors
if tagged or igtv or highlights or posts:
if (not self.context.is_logged_in and
profile.is_private):
raise LoginRequiredException("--login=USERNAME required.")
if (self.context.username != profile.username and
profile.is_private and
not profile.followed_by_viewer):
raise PrivateProfileNotFollowedException("Private but not followed.")
# Download tagged, if requested
if tagged:
with self.context.error_catcher('Download tagged of {}'.format(profile_name)):
self.download_tagged(profile, fast_update=fast_update, post_filter=post_filter,
latest_stamps=latest_stamps)
# Download IGTV, if requested
if igtv:
with self.context.error_catcher('Download IGTV of {}'.format(profile_name)):
self.download_igtv(profile, fast_update=fast_update, post_filter=post_filter,
latest_stamps=latest_stamps)
# Download highlights, if requested
if highlights:
with self.context.error_catcher('Download highlights of {}'.format(profile_name)):
self.download_highlights(profile, fast_update=fast_update, storyitem_filter=storyitem_filter)
# Iterate over pictures and download them
if posts:
self.context.log("Retrieving posts from profile {}.".format(profile_name))
posts_takewhile: Optional[Callable[[Post], bool]] = None
if latest_stamps is not None:
# pylint:disable=cell-var-from-loop
last_scraped = latest_stamps.get_last_post_timestamp(profile_name)
posts_takewhile = lambda p: p.date_utc.replace(tzinfo=timezone.utc) > last_scraped
posts_to_download = profile.get_posts()
self.posts_download_loop(posts_to_download, profile_name, fast_update, post_filter,
total_count=profile.mediacount, owner_profile=profile,
takewhile=posts_takewhile)
if latest_stamps is not None and posts_to_download.first_item is not None:
latest_stamps.set_last_post_timestamp(profile_name,
posts_to_download.first_item.date_local.astimezone())
if stories and profiles:
with self.context.error_catcher("Download stories"):
self.context.log("Downloading stories")
self.download_stories(userids=list(profiles), fast_update=fast_update, filename_target=None,
storyitem_filter=storyitem_filter, latest_stamps=latest_stamps)
def download_profile(self, profile_name: Union[str, Profile],
profile_pic: bool = True, profile_pic_only: bool = False,
fast_update: bool = False,
download_stories: bool = False, download_stories_only: bool = False,
download_tagged: bool = False, download_tagged_only: bool = False,
post_filter: Optional[Callable[[Post], bool]] = None,
storyitem_filter: Optional[Callable[[StoryItem], bool]] = None) -> None:
"""Download one profile
.. deprecated:: 4.1
Use :meth:`Instaloader.download_profiles`.
"""
# Get profile main page json
# check if profile does exist or name has changed since last download
# and update name and json data if necessary
if isinstance(profile_name, str):
profile = self.check_profile_id(profile_name.lower())
else:
profile = profile_name
profile_name = profile.username
# Save metadata as JSON if desired.
if self.save_metadata is not False:
json_filename = '{0}/{1}_{2}'.format(self.dirname_pattern.format(profile=profile_name, target=profile_name),
profile_name, profile.userid)
self.save_metadata_json(json_filename, profile)
if self.context.is_logged_in and profile.has_blocked_viewer and not profile.is_private:
# raising ProfileNotExistsException invokes "trying again anonymously" logic
raise ProfileNotExistsException("Profile {} has blocked you".format(profile_name))
# Download profile picture
if profile_pic or profile_pic_only:
with self.context.error_catcher('Download profile picture of {}'.format(profile_name)):
self.download_profilepic(profile)
if profile_pic_only:
return
# Catch some errors
if profile.is_private:
if not self.context.is_logged_in:
raise LoginRequiredException("profile %s requires login" % profile_name)
if not profile.followed_by_viewer and \
self.context.username != profile.username:
raise PrivateProfileNotFollowedException("Profile %s: private but not followed." % profile_name)
else:
if self.context.is_logged_in and not (download_stories or download_stories_only):
self.context.log("profile %s could also be downloaded anonymously." % profile_name)
# Download stories, if requested
if download_stories or download_stories_only:
if profile.has_viewable_story:
with self.context.error_catcher("Download stories of {}".format(profile_name)):
self.download_stories(userids=[profile.userid], filename_target=profile_name,
fast_update=fast_update, storyitem_filter=storyitem_filter)
else:
self.context.log("{} does not have any stories.".format(profile_name))
if download_stories_only:
return
# Download tagged, if requested
if download_tagged or download_tagged_only:
with self.context.error_catcher('Download tagged of {}'.format(profile_name)):
self.download_tagged(profile, fast_update=fast_update, post_filter=post_filter)
if download_tagged_only:
return
# Iterate over pictures and download them
self.context.log("Retrieving posts from profile {}.".format(profile_name))
self.posts_download_loop(profile.get_posts(), profile_name, fast_update, post_filter,
total_count=profile.mediacount, owner_profile=profile)
def interactive_login(self, username: str) -> None:
"""Logs in and internally stores session, asking user for password interactively.
:raises LoginRequiredException: when in quiet mode.
:raises InvalidArgumentException: If the provided username does not exist.
:raises ConnectionException: If connection to Instagram failed."""
if self.context.quiet:
raise LoginRequiredException("Quiet mode requires given password or valid session file.")
try:
password = None
while password is None:
password = getpass.getpass(prompt="Enter Instagram password for %s: " % username)
try:
self.login(username, password)
except BadCredentialsException as err:
print(err, file=sys.stderr)
password = None
except TwoFactorAuthRequiredException:
while True:
try:
code = input("Enter 2FA verification code: ")
self.two_factor_login(code)
break
except BadCredentialsException as err:
print(err, file=sys.stderr)
pass
|
the-stack_0_21835 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '10'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_TENSORRT_VERSION = '5'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16, 17, 18]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_WORKSPACE_ROOT = ''
_TF_BAZELRC = ''
_TF_CURRENT_BAZEL_VERSION = None
NCCL_LIB_PATHS = [
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
]
# List of files to configure when building Bazel on Apple platforms.
APPLE_BAZEL_FILES = [
'tensorflow/lite/experimental/ios/BUILD',
'tensorflow/lite/experimental/objc/BUILD',
'tensorflow/lite/experimental/swift/BUILD'
]
# List of files to move when building for iOS.
IOS_FILES = [
'tensorflow/lite/experimental/objc/TensorFlowLiteObjC.podspec',
'tensorflow/lite/experimental/swift/TensorFlowLiteSwift.podspec',
]
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(environ_cp,
'PYTHON_BIN_PATH',
ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
_ = get_python_major_version(python_bin_path)
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH', environ_cp.get('PYTHONPATH'))
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' %
(var_name, ', '.join(true_strings), ', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
write_to_bazelrc('build --config=%s' % bazel_config_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
write_action_env_to_bazelrc(var_name, var)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
# Treat "0.24" as "0.24.0"
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version, max_version):
"""Check installed bazel version is between min_version and max_version.
Args:
min_version: string for minimum bazel version.
max_version: string for maximum bazel version.
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
max_version_int = convert_version_to_int(max_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(1)
if (curr_version_int > max_version_int and
'TF_IGNORE_MAX_BAZEL_VERSION' not in os.environ):
print('Please downgrade your bazel installation to version %s or lower to '
'build TensorFlow! To downgrade: download the installer for the old '
'version (from https://github.com/bazelbuild/bazel/releases) then '
'run the installer.' % max_version)
sys.exit(1)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native -Wno-sign-compare'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError('Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' %
(var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc(
'ANDROID_NDK_API_LEVEL',
get_ndk_api_level(environ_cp, android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def get_ndk_api_level(environ_cp, android_ndk_home_path):
"""Gets the appropriate NDK API level to use for the provided Android NDK path."""
# First check to see if we're using a blessed version of the NDK.
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_version = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_version) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' % (android_ndk_home_path, ndk_version,
_SUPPORTED_ANDROID_NDK_VERSIONS))
# Now grab the NDK API level to use. Note that this is different from the
# SDK API level, as the NDK API level is effectively the *min* target SDK
# version.
platforms = os.path.join(android_ndk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [
x.replace('android-', '') for x in api_levels if 'android-' in x
]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_ndk_home_path, 'platforms',
'android-' + api_level))
android_ndk_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_API_LEVEL',
var_default='18', # 18 is required for GPU acceleration.
ask_for_var=('Please specify the (min) Android NDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the NDK path.')
return android_ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var='Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_paths(environ_cp):
"""Set TF_CUDA_PATHS."""
ask_cuda_paths = (
'Please specify the comma-separated list of base paths to look for CUDA '
'libraries and headers. [Leave empty to use the default]: ')
tf_cuda_paths = get_from_env_or_user_or_default(environ_cp, 'TF_CUDA_PATHS',
ask_cuda_paths, '')
if tf_cuda_paths:
environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
def set_tf_cuda_version(environ_cp):
"""Set TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
tf_cuda_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDA_VERSION',
ask_cuda_version,
_DEFAULT_CUDA_VERSION)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
def set_tf_cudnn_version(environ_cp):
"""Set TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
tf_cudnn_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDNN_VERSION',
ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_version(environ_cp):
"""Set TF_TENSORRT_VERSION."""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
if not int(environ_cp.get('TF_NEED_TENSORRT', False)):
return
ask_tensorrt_version = (
'Please specify the TensorRT version you want to use. '
'[Leave empty to default to TensorRT %s]: ') % _DEFAULT_TENSORRT_VERSION
tf_tensorrt_version = get_from_env_or_user_or_default(
environ_cp, 'TF_TENSORRT_VERSION', ask_tensorrt_version,
_DEFAULT_TENSORRT_VERSION)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
def set_tf_nccl_version(environ_cp):
"""Set TF_NCCL_VERSION."""
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platform.')
if 'TF_NCCL_VERSION' in environ_cp:
return
ask_nccl_version = (
'Please specify the locally installed NCCL version you want to use. '
'[Leave empty to use http://github.com/nvidia/nccl]: ')
tf_nccl_version = get_from_env_or_user_or_default(environ_cp,
'TF_NCCL_VERSION',
ask_nccl_version, '')
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'CUDA compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size, and that '
'TensorFlow only supports compute '
'capabilities >= 3.5 [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: %s' % compute_capability)
all_valid = False
else:
ver = float(m.group(0))
if ver < 3.0:
print('ERROR: TensorFlow only supports CUDA compute capabilities 3.0 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
if ver < 3.5:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than 3.5. Disable XLA when running on older GPUs.')
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
write_to_bazelrc('test --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
write_to_bazelrc('test --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
"""Set COMPUTECPP_TOOLKIT_PATH."""
def toolkit_exists(toolkit_path):
"""Check if a computecpp toolkit path is valid."""
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
"""Set TRISYCL_INCLUDE_DIR."""
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: ') % (
_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found' %
(trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
def set_mpi_home(environ_cp):
"""Set MPI_HOME."""
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (
os.path.exists(os.path.join(mpi_home, 'include')) and
(os.path.exists(os.path.join(mpi_home, 'lib')) or
os.path.exists(os.path.join(mpi_home, 'lib64')) or
os.path.exists(os.path.join(mpi_home, 'lib32'))))
if not exists:
print(
'Invalid path to the MPI Toolkit. %s or %s or %s or %s cannot be found'
% (os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib')),
os.path.exists(os.path.join(mpi_home, 'lib64')),
os.path.exists(os.path.join(mpi_home, 'lib32'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
"""Set other MPI related variables."""
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False',
'MPI_LIB_IS_OPENMPI=True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
elif os.path.exists(os.path.join(mpi_home, 'lib64/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib64/libmpi.so'), 'third_party/mpi/libmpi.so')
elif os.path.exists(os.path.join(mpi_home, 'lib32/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib32/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError(
'Cannot find the MPI library file in %s/lib or %s/lib64 or %s/lib32' %
(mpi_home, mpi_home, mpi_home))
def system_specific_test_config(env):
"""Add default build and test flags required for TF tests to bazelrc."""
write_to_bazelrc('test --flaky_test_attempts=3')
write_to_bazelrc('test --test_size_filters=small,medium')
write_to_bazelrc(
'test --test_tag_filters=-benchmark-test,-no_oss,-oss_serial')
write_to_bazelrc('test --build_tag_filters=-benchmark-test,-no_oss')
if is_windows():
if env.get('TF_NEED_CUDA', None) == '1':
write_to_bazelrc(
'test --test_tag_filters=-no_windows,-no_windows_gpu,-no_gpu')
write_to_bazelrc(
'test --build_tag_filters=-no_windows,-no_windows_gpu,-no_gpu')
else:
write_to_bazelrc('test --test_tag_filters=-no_windows,-gpu')
write_to_bazelrc('test --build_tag_filters=-no_windows,-gpu')
elif is_macos():
write_to_bazelrc('test --test_tag_filters=-gpu,-nomac,-no_mac')
write_to_bazelrc('test --build_tag_filters=-gpu,-nomac,-no_mac')
elif is_linux():
if env.get('TF_NEED_CUDA', None) == '1':
write_to_bazelrc('test --test_tag_filters=-no_gpu')
write_to_bazelrc('test --build_tag_filters=-no_gpu')
write_to_bazelrc('test --test_env=LD_LIBRARY_PATH')
else:
write_to_bazelrc('test --test_tag_filters=-gpu')
write_to_bazelrc('test --build_tag_filters=-gpu')
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs:
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Fix winsock2.h conflicts
write_to_bazelrc(
'build --copt=-DWIN32_LEAN_AND_MEAN --host_copt=-DWIN32_LEAN_AND_MEAN')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
# have to distinct them. This avoids building the same targets twice.
write_to_bazelrc('build --distinct_host_configuration=false')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def configure_ios():
"""Configures TensorFlow for iOS builds.
This function will only be executed if `is_macos()` is true.
"""
if not is_macos():
return
if _TF_CURRENT_BAZEL_VERSION is None or _TF_CURRENT_BAZEL_VERSION < 23000:
print(
'Building Bazel rules on Apple platforms requires Bazel 0.23 or later.')
for filepath in APPLE_BAZEL_FILES:
existing_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath + '.apple')
renamed_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath)
symlink_force(existing_filepath, renamed_filepath)
for filepath in IOS_FILES:
filename = os.path.basename(filepath)
new_filepath = os.path.join(_TF_WORKSPACE_ROOT, filename)
symlink_force(filepath, new_filepath)
def validate_cuda_config(environ_cp):
"""Run find_cuda_config.py and return cuda_toolkit_path, or None."""
def maybe_encode_env(env):
"""Encodes unicode in env to str on Windows python 2.x."""
if not is_windows() or sys.version_info[0] != 2:
return env
for k, v in env.items():
if isinstance(k, unicode):
k = k.encode('ascii')
if isinstance(v, unicode):
v = v.encode('ascii')
env[k] = v
return env
cuda_libraries = ['cuda', 'cudnn']
if is_linux():
if 'TF_TENSORRT_VERSION' in environ_cp: # if env variable exists
cuda_libraries.append('tensorrt')
if environ_cp.get('TF_NCCL_VERSION', None): # if env variable not empty
cuda_libraries.append('nccl')
proc = subprocess.Popen(
[environ_cp['PYTHON_BIN_PATH'], 'third_party/gpus/find_cuda_config.py'] +
cuda_libraries,
stdout=subprocess.PIPE,
env=maybe_encode_env(environ_cp))
if proc.wait():
# Errors from find_cuda_config.py were sent to stderr.
print('Asking for detailed CUDA configuration...\n')
return False
config = dict(
tuple(line.decode('ascii').rstrip().split(': ')) for line in proc.stdout)
print('Found CUDA %s in:' % config['cuda_version'])
print(' %s' % config['cuda_library_dir'])
print(' %s' % config['cuda_include_dir'])
print('Found cuDNN %s in:' % config['cudnn_version'])
print(' %s' % config['cudnn_library_dir'])
print(' %s' % config['cudnn_include_dir'])
if 'tensorrt_version' in config:
print('Found TensorRT %s in:' % config['tensorrt_version'])
print(' %s' % config['tensorrt_library_dir'])
print(' %s' % config['tensorrt_include_dir'])
if config.get('nccl_version', None):
print('Found NCCL %s in:' % config['nccl_version'])
print(' %s' % config['nccl_library_dir'])
print(' %s' % config['nccl_include_dir'])
print('\n')
environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
return True
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
global _TF_CURRENT_BAZEL_VERSION
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_TF_WORKSPACE_ROOT = args.workspace
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
current_bazel_version = check_bazel_version('0.22.0', '0.24.1')
_TF_CURRENT_BAZEL_VERSION = convert_version_to_int(current_bazel_version)
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_TENSORRT'] = '0'
else:
environ_cp['TF_CONFIGURE_IOS'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
# runtime to allow the Tensorflow testcases which compare numpy
# results to Tensorflow results to succeed.
if is_ppc64le():
write_action_env_to_bazelrc('OMP_NUM_THREADS', 1)
xla_enabled_by_default = is_linux()
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
xla_enabled_by_default, 'xla')
set_action_env_var(environ_cp, 'TF_NEED_OPENCL_SYCL', 'OpenCL SYCL', False)
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_ROCM', 'ROCm', False)
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_action_env_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT', False)
environ_save = dict(environ_cp)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
if validate_cuda_config(environ_cp):
cuda_env_names = [
'TF_CUDA_VERSION', 'TF_CUBLAS_VERSION', 'TF_CUDNN_VERSION',
'TF_TENSORRT_VERSION', 'TF_NCCL_VERSION', 'TF_CUDA_PATHS',
'CUDA_TOOLKIT_PATH'
]
for name in cuda_env_names:
if name in environ_cp:
write_action_env_to_bazelrc(name, environ_cp[name])
break
# Restore settings changed below if CUDA config could not be validated.
environ_cp = dict(environ_save)
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_version(environ_cp)
set_tf_nccl_version(environ_cp)
set_tf_cuda_paths(environ_cp)
else:
raise UserInputError(
'Invalid CUDA setting were provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
write_to_bazelrc('test:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') == '1':
write_to_bazelrc('build --config=download_clang')
write_to_bazelrc('test --config=download_clang')
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('SYCL / CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
# Add a config option to build TensorFlow 2.0 API.
write_to_bazelrc('build:v2 --define=tf_api_version=2')
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
system_specific_test_config(os.environ)
set_action_env_var(environ_cp, 'TF_CONFIGURE_IOS', 'iOS', False)
if environ_cp.get('TF_CONFIGURE_IOS') == '1':
configure_ios()
else:
# TODO(pcloudy): Remove BAZEL_USE_CPP_ONLY_TOOLCHAIN after Bazel is upgraded
# to 0.24.0.
# For working around https://github.com/bazelbuild/bazel/issues/7607
if is_macos():
write_to_bazelrc('build --action_env=BAZEL_USE_CPP_ONLY_TOOLCHAIN=1')
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See .bazelrc for more '
'details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('gdr', 'Build with GDR support.')
config_info_line('verbs', 'Build with libverbs support.')
config_info_line('ngraph', 'Build with Intel nGraph support.')
config_info_line('numa', 'Build with NUMA support.')
config_info_line(
'dynamic_kernels',
'(Experimental) Build kernels into separate shared objects.')
print('Preconfigured Bazel build configs to DISABLE default on features:')
config_info_line('noaws', 'Disable AWS S3 filesystem support.')
config_info_line('nogcp', 'Disable GCP support.')
config_info_line('nohdfs', 'Disable HDFS support.')
config_info_line('noignite', 'Disable Apache Ignite support.')
config_info_line('nokafka', 'Disable Apache Kafka support.')
config_info_line('nonccl', 'Disable NVIDIA NCCL support.')
if __name__ == '__main__':
main()
|
the-stack_0_21836 | from typing import Optional
from sharpy.plans.acts import ActBase
from sc2 import UnitTypeId, AbilityId
from sc2.position import Point2
from sc2.unit import Unit
from sc2.units import Units
from sharpy.managers.roles import UnitTask
HALLUCINATION_ENERGY_COST = 75
class HallucinatedPhoenixScout(ActBase):
"""
Creates hallucinated phoenixes with sentries and uses them as scouts.
time_interval is seconds.
"""
def __init__(self, time_interval: int = 60):
super().__init__()
self.time_interval: int = time_interval
# When we last created a hallucinated phoenix scout
self.last_created: int = -1
self.last_target: Optional[Point2] = None
self.current_phoenix_tag: Optional[int] = None
async def execute(self) -> bool:
phoenix = await self.get_hallucinated_phoenix()
if phoenix:
self.move_phoenix(phoenix)
if not phoenix and self.should_send_scout:
# We should have a Phoenix on the next iteration
self.create_hallucinated_phoenix()
return True # Non blocking
async def get_hallucinated_phoenix(self) -> Optional[Unit]:
if self.current_phoenix_tag is not None:
phoenix = self.knowledge.roles.units(UnitTask.Scouting).find_by_tag(self.current_phoenix_tag)
if phoenix is not None:
return phoenix
# Phoenix does not exist anymore
self.current_phoenix_tag = None
phoenixes = self.knowledge.roles.units(UnitTask.Hallucination)(UnitTypeId.PHOENIX)
if phoenixes.exists:
phoenix = phoenixes[0]
self.current_phoenix_tag = phoenix.tag
self.knowledge.roles.set_task(UnitTask.Scouting, phoenix)
return phoenix
return None
def create_hallucinated_phoenix(self):
sentries: Units = self.cache.own(UnitTypeId.SENTRY)
if not sentries.exists:
return
another_sentry_with_energy_exists = False
for sentry in sentries:
# we don't want to actually spend all energy to make hallucination
if sentry.energy > HALLUCINATION_ENERGY_COST + 50 or (another_sentry_with_energy_exists and sentry.energy > HALLUCINATION_ENERGY_COST):
if self.knowledge.known_enemy_units_mobile.closer_than(15, sentry):
# Don't make combat hallucinated phoenixes1
continue
# todo: should reserve a sentry for this purpose or at least reserve most of it's energy for this.
# self.knowledge.add_reserved_unit(sentry.tag)
self.do(sentry(AbilityId.HALLUCINATION_PHOENIX))
self.last_created = self.knowledge.ai.time
return
elif sentry.energy > 50: # force field
another_sentry_with_energy_exists = True
@property
def should_send_scout(self) -> bool:
if self.knowledge.possible_rush_detected and self.ai.time < 5 * 60:
return False # no scout in first 5 min if rush incoming
return self.last_created + self.time_interval < self.knowledge.ai.time
def move_phoenix(self, phoenix: Unit):
target = self.select_target()
self.do(phoenix.move(target))
if target != self.last_target:
self.last_target = target
self.print(f"scouting {target}, interval {self.time_interval}")
def select_target(self) -> Point2:
# todo: there just might be a linear function here...
if self.ai.time < 6 * 60:
targets = self.knowledge.enemy_expansion_zones[0:3]
elif self.ai.time < 8 * 60:
targets = self.knowledge.enemy_expansion_zones[0:4]
elif self.ai.time < 10 * 60:
targets = self.knowledge.enemy_expansion_zones[0:5]
else:
# This includes our bases as well (sorted to the end), but the hallucination
# won't live long enough to scout all bases.
targets = self.knowledge.enemy_expansion_zones
targets.sort(key=lambda z: z.last_scouted_mineral_line)
if len(targets) > 0:
return targets[0].mineral_line_center
return self.knowledge.enemy_main_zone.mineral_line_center
|
the-stack_0_21837 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 19 16:16:58 2021
@author: tmlab
"""
if __name__ == '__main__':
import os
import pickle
from copy import copy
from gensim.corpora import Dictionary
import PySimpleGUI as sg
directory = os.path.dirname(os.path.abspath(__file__))
directory = directory.replace("\\", "/") # window|
os.chdir(directory)
#%% phase 1. data laod
print('phase 1. loading and preprocessing data')
import data_preprocessing
with open( directory+ '/input/DT_211118.pkl', 'rb') as fr :
data = pickle.load(fr)
data_sample = copy(data)
data_sample = data_preprocessing.initialize(data_sample)
data_sample = data_preprocessing.filter_by_year(data_sample)
data_sample = data_preprocessing.filter_by_textsize(data_sample)
data_sample = data_preprocessing.preprocess_text(data_sample, directory)
#%% phase 2. embedding
import embedding
import CPC
print('phase 2. loading CPC def and embedding')
try :
# CPC embedding
with open( directory+ '/input/CPC_subclass_def.pkl', 'rb') as fr :
CPC_definition = pickle.load(fr)
except :
with open( 'D:/OneDrive - 아주대학교/db/patent/CPC/CPC_subclass_def.pkl', 'rb') as fr :
CPC_definition = pickle.load(fr)
model = embedding.model
CPC_dict = CPC.generate_CPC_dict(data_sample)
CPC_dict_filtered = CPC.filter_CPC_dict(data_sample, CPC_dict, CPC_definition)
encoded_CPC = embedding.CPC_embedding(model, CPC_definition, CPC_dict_filtered)
texts = data_sample['TAC_keyword']
# document embedding, ready to LDA
keyword_dct = Dictionary(texts)
keyword_dct.filter_extremes(no_below = 10, no_above = 0.1)
keyword_list = keyword_dct.token2id.keys()
corpus = [keyword_dct.doc2bow(text) for text in texts]
# encoded_keyword = embedding.keyword_embedding(keyword_list)
texts = [[k for k in doc if k in keyword_list] for doc in texts]
docs = [" ".join(i) for i in texts]
encoded_docs = embedding.docs_embedding(model,docs)
encoded_CPC = embedding.CPC_embedding(model, CPC_definition, CPC_dict_filtered)
#%% phase 3. LDA and embedding
import LDA
print('phase 3. LDA and embedding')
try :
LDA_parameter = {}
LDA_parameter['N_topics'] = input("토픽 개수를 입력하세요 : ")
LDA_parameter['Alpha'] = float(input("파라미터 Alpha를 입력하세요 : "))
LDA_parameter['Beta'] = float(input("파라미터 Beta를 입력하세요 : "))
# 30, 0.5, 0.1
lda_model = LDA.lda_model(corpus, keyword_dct,
LDA_parameter['N_topics'],
LDA_parameter['Alpha'],
LDA_parameter['Beta'])
except :
lda_model = LDA.lda_model(corpus, keyword_dct,
30,
0.5,
0.1)
topic_doc_df = LDA.get_topic_doc(lda_model, corpus)
encoded_topic = LDA.get_encoded_topic(topic_doc_df, encoded_docs)
print(encoded_topic)
#%% phase 4. LDA result handling
import LDA
import pandas as pd
print('phase 4. Calculate LDA2CPC ')
topic_word_df = LDA.get_topic_word_matrix(lda_model)
CPC_topic_matrix = LDA.get_CPC_topic_matrix(encoded_CPC, encoded_topic)
topic_year_df = LDA.get_topic_vol_year(lda_model, topic_doc_df, data_sample)
# standard = 0.55
# classified_topics = LDA.classifying_topic(CPC_topic_matrix, standard)
volumn_dict = LDA.get_topic_vol(lda_model, corpus)
CAGR_dict = LDA.get_topic_CAGR(topic_year_df)
Novelty_dict = LDA.get_topic_novelty(CPC_topic_matrix)
CPC_match_dict = LDA.get_topic2CPC(CPC_topic_matrix)
total_df = pd.DataFrame([volumn_dict, CAGR_dict, Novelty_dict, CPC_match_dict]).transpose()
total_df.columns = ['Volumn', 'CAGR', 'Novelty', 'CPC-match']
print(total_df)
topic2doc_title = LDA.get_most_similar_doc2topic(data_sample, topic_doc_df)
import xlsxwriter
import pandas as pd
# directory = 'C:/Users/tmlab/Desktop/작업공간/'
writer = pd.ExcelWriter('./output/LDA_results.xlsx',
engine='xlsxwriter')
topic_word_df.to_excel(writer , sheet_name = 'topic_word', index = 1)
pd.DataFrame(topic_doc_df).to_excel(writer , sheet_name = 'topic_doc', index = 1)
topic_year_df.to_excel(writer , sheet_name = 'topic_year', index = 1)
topic2doc_title.to_excel(writer , sheet_name = 'topic_doc_title', index = 1)
CPC_topic_matrix.to_excel(writer , sheet_name = 'topic2CPC', index = 1)
total_df.to_excel(writer , sheet_name = 'topic_stats', index = 1)
writer.save()
writer.close()
#%% phase 5. CPC visualization
import Visualization
Visualization.pchart_CPC_topic(CPC_topic_matrix, [0,1,2,3])
Visualization.heatmap_CPC_topic(CPC_topic_matrix)
Visualization.portfolio_CPC_topic(Novelty_dict, CAGR_dict, volumn_dict, CPC_topic_matrix, CPC_match_dict)
#%%
# #%% test
# import LDA
# import numpy as np
# import matplotlib.pyplot as plt
# # CPC_topic_matrix.apply()
# standard = np.percentile(CPC_topic_matrix.min(), 90) # 거의 0.9
# standard = 0.45
# classified_topics = LDA.classifying_topic(CPC_topic_matrix, standard)
# novel_topics = [k for k,v in classified_topics.items() if v== 'Novel']
# temp = topic_word_df[novel_topics]
# # 전체 # 최근접
# # plt.hist(CPC_topic_matrix.to_numpy().flatten(), bins=100)
# # plt.hist(CPC_topic_matrix.min().to_numpy().flatten(), bins= 10)
# #%% phase 3. genearte sim matrix
# import pandas as pd
# import embedding
# standard = {}
# # standard['class'] = np.percentile(class_matrix, 95)
# # standard['subclass'] = np.percentile(subclass_matrix, 95)
# # standard['group'] = np.percentile(group_matrix, 95)
# # class_matrix_ = class_matrix.applymap(lambda x : 1 if x > standard['class'] else 0)
# # subclass_matrix_ = subclass_matrix.applymap(lambda x : 1 if x > standard['subclass'] else 0)
# # group_matrix_ = group_matrix.applymap(lambda x : 1 if x > standard['group'] else 0)
# #%%
# import embedding
# word_cls_df = pd.DataFrame()
# for matrix in [class_matrix_, subclass_matrix_, group_matrix_] :
# DICT = embedding.classify_keyword(matrix)
# word_cls_df = word_cls_df.append(DICT, ignore_index=1)
# word_cls_df = word_cls_df.transpose()
# word_cls_df.columns = ['class', 'subclass' , 'group']
#%% phase 4. classifying keyword
#%% phase A-1. LDA tunning and modelling
# import LDA
# import pandas as pd
# import matplotlib.pyplot as plt
# import numpy as np
# if os.path.isfile(directory + '/lda_tuning_results.csv') :
# tunning_results = pd.read_csv(directory + '/lda_tuning_results.csv')
# else :
# tunning_results = LDA.tunning(texts, keyword_dct, corpus)
# tunning_results.to_csv(directory + '/lda_tuning_results.csv', index=False)
# # plotting tunned
# temp = tunning_results.groupby('Topics').mean()
# plt.plot(temp['U_mass'])
#%% test
# import matplotlib.pyplot as plt
# import numpy as np
# temp = embedding.get_sim_dist(encoded_CPC['G05B'],encoded_keyword)
# plt.hist(temp, bins=50)
# plt.axvline(np.percentile(temp, 90), color = 'red') # Q1
# plt.show()
|
the-stack_0_21838 | import os
import time
import random
import logging
import torch
import numpy as np
from glob import glob
from logging import Logger
from tqdm.auto import tqdm
from torch_geometric.data import Batch
class BlackHole(object):
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, name):
return self
def get_logger(name, log_dir=None, log_fn='log.txt'):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s::%(name)s::%(levelname)s] %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if log_dir is not None:
file_handler = logging.FileHandler(os.path.join(log_dir, log_fn))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def get_new_log_dir(root='./logs', prefix='', tag=''):
fn = time.strftime('%Y_%m_%d__%H_%M_%S', time.localtime())
if prefix != '':
fn = prefix + '_' + fn
if tag != '':
fn = fn + '_' + tag
log_dir = os.path.join(root, fn)
os.makedirs(log_dir)
return log_dir
def seed_all(seed):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def inf_iterator(iterable):
iterator = iterable.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = iterable.__iter__()
def log_hyperparams(writer, args):
from torch.utils.tensorboard.summary import hparams
vars_args = {k:v if isinstance(v, str) else repr(v) for k, v in vars(args).items()}
exp, ssi, sei = hparams(vars_args, {})
writer.file_writer.add_summary(exp)
writer.file_writer.add_summary(ssi)
writer.file_writer.add_summary(sei)
def int_tuple(argstr):
return tuple(map(int, argstr.split(',')))
def str_tuple(argstr):
return tuple(argstr.split(','))
def repeat_data(data, num_repeat):
datas = [data.clone() for i in range(num_repeat)]
return Batch.from_data_list(datas)
def repeat_batch(batch, num_repeat):
datas = batch.to_data_list()
new_data = []
for i in range(num_repeat):
new_data += datas.clone()
return Batch.from_data_list(new_data)
def get_checkpoint_path(folder, it=None):
if it is not None:
return os.path.join(folder, '%d.pt' % it), it
all_iters = list(map(lambda x: int(os.path.basename(x[:-3])), glob(os.path.join(folder, '*.pt'))))
all_iters.sort()
return os.path.join(folder, '%d.pt' % all_iters[-1]), all_iters[-1]
|
the-stack_0_21839 | """Ideas for how fields could be defined, registered, and used."""
import warnings
class CachedField(object):
"""Custom property-like object.
A descriptor for caching field accessors.
Based on pd.core.accessor.CachedAccessor
Args:
name (str): Namespace that will be accessed under, e.g. ``act.foo``.
accessor (cls): Class with the extension methods.
Notes:
For accessor, The class's __init__ method assumes ``Activity``
as the single argument ``data``.
"""
def __init__(self, name, field):
self._name = name
self._field = field
def __get__(self, obj, cls):
if obj is None:
# we're accessing the attribute of the class, i.e., Activity.elevation
return self._field
field_obj = self._field(obj)
# Replace the property with the accessor object.
# This is what enables caching - next call to "get" will return
# the existing field_obj, rather than redo the read-in process.
# Inspired by: https://www.pydanny.com/cached-property.html
setattr(obj, self._name, field_obj)
return field_obj
def register_field(name):
"""Register a custom accessor on Activity objects.
Based on :func:`pandas.api.extensions.register_dataframe_accessor`.
Args:
name (str): Name under which the accessor should be registered. A warning
is issued if this name conflicts with a preexisting attribute.
Returns:
callable: A class decorator.
See also:
:func:`pandas.api.extensions.register_dataframe_accessor`
Register a custom accessor on DataFrame objects.
`pandas.api.extensions._register_accessor() <https://github.com/pandas-dev/pandas/blob/v1.2.4/pandas/core/accessor.py#L189-L275>`_
Notes:
When accessed, your accessor will be initialized with the Activity object
the user is interacting with. So the signature must be
.. code-block:: python
def __init__(self, activity_obj): # noqa: E999
...
Examples:
In your library code::
import heartandsole as hns
@hns.api.extensions.register_field('running_smoothness')
class SmoothnessAccessor:
def __init__(self, activity_obj):
self._obj = activity_obj
@property
def avg(self):
# return the average of the records
return self._obj.records['running_smoothness'].mean()
Back in an interactive IPython session:
.. code-block:: ipython
In [1]: act = hns.Activity(pd.DataFrame({{'running_smoothness': np.linspace(0, 10)}})
In [2]: act.running_smoothness.avg
Out[2]: 5.0
TODO:
* Consider making this a classmethod of Activity.
"""
from heartandsole import Activity
def decorator(field):
if hasattr(Activity, name):
warnings.warn(
f"registration of accessor {repr(field)} under name "
f"{repr(name)} for type {repr(Activity)} is overriding a preexisting "
f"attribute with the same name.",
UserWarning,
stacklevel=2,
)
setattr(Activity, name, CachedField(name, field))
Activity._fields.add(name)
return field
return decorator
|
the-stack_0_21840 | #!/usr/bin/python
#
# Copyright (c) 2015 Iain Colledge for Adafruit Industries
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Python library for the TSL2561 digital luminosity (light) sensors.
This library is heavily based on the Arduino library for the TSL2561 digital
luminosity (light) sensors. It is basically a simple translation from C++ to
Python.
The thread on the Adafruit forum helped a lot to do this. Thanks to static,
huelke, pandring, adafruit_support_rick, scortier, bryand, csalty, lenos and
of course to Adafruit
Source for the Arduino library:
https://github.com/adafruit/TSL2561-Arduino-Library
Adafruit forum thread:
http://forums.adafruit.com/viewtopic.php?f=8&t=34922&sid=8336d566f2f03c25882aaf34c8a15a92
Original code posted here:
http://forums.adafruit.com/viewtopic.php?f=8&t=34922&start=75#p222877
This was checked against a 10 UKP lux meter from Amazon and was withing 10% up
and down the range, the meter had a stated accuracy of 5% but then again, 10
UKP meter.
Changelog:
1.2 - Additional clean-up - Chris Satterlee
Added underscore back into class name
Removed unnecessary inheritance from Adafruit_I2C
Removed vestigial trailing */ from comments
Removed (now unnecessary) autogain hack
Fold (most) long lines to comply with col 80 limit
Added BSD license header comment
1.1 - Fixes from
https://forums.adafruit.com/viewtopic.php?f=8&t=34922&p=430795#p430782
- Iain Colledge
Bug #1: The class name has the middle two digits transposed -
Adafruit_TSL2651 should be Adafruit_TSL2561
Bug #2: The read8 and read16 methods (functions) call the I2C readS8 and
readS16 methods respectively. They should call the readU8 and
readU16 (i.e. unsigned) methods.
Minor fixes and changes due to Pycharm and SonarQube recommendations, it
looks like Python more than C++ now
Added Exception thrown on sensor saturation
1.0 - Initial release - Iain Colledge
Removed commented out C++ code
Added calculate_avg_lux
Changed main method to use calculate_avg_lux and loop argument support
added.
Ported "Extended delays to take into account loose timing with 'delay'"
update from CPP code
Added hack so that with autogain every sample goes from 1x to 16x as going
from 16x to 1x does not work
"""
from __future__ import print_function
import logging
import sys
import time
from Adafruit_I2C import Adafruit_I2C
# Logging needs to be set at top after imports
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
class Adafruit_TSL2561(object):
TSL2561_VISIBLE = 2 # channel 0 - channel 1
TSL2561_INFRARED = 1 # channel 1
TSL2561_FULLSPECTRUM = 0 # channel 0
# I2C address options
TSL2561_ADDR_LOW = 0x29
TSL2561_ADDR_FLOAT = 0x39 # Default address (pin left floating)
TSL2561_ADDR_HIGH = 0x49
# Lux calculations differ slightly for CS package
TSL2561_PACKAGE_CS = 0
TSL2561_PACKAGE_T_FN_CL = 1
TSL2561_COMMAND_BIT = 0x80 # Must be 1
TSL2561_CLEAR_BIT = 0x40 # Clears any pending interrupt (write 1 to clear)
TSL2561_WORD_BIT = 0x20 # 1 = read/write word (rather than byte)
TSL2561_BLOCK_BIT = 0x10 # 1 = using block read/write
TSL2561_CONTROL_POWERON = 0x03
TSL2561_CONTROL_POWEROFF = 0x00
TSL2561_LUX_LUXSCALE = 14 # Scale by 2^14
TSL2561_LUX_RATIOSCALE = 9 # Scale ratio by 2^9
TSL2561_LUX_CHSCALE = 10 # Scale channel values by 2^10
TSL2561_LUX_CHSCALE_TINT0 = 0x7517 # 322/11 * 2^TSL2561_LUX_CHSCALE
TSL2561_LUX_CHSCALE_TINT1 = 0x0FE7 # 322/81 * 2^TSL2561_LUX_CHSCALE
# T, FN and CL package values
TSL2561_LUX_K1T = 0x0040 # 0.125 * 2^RATIO_SCALE
TSL2561_LUX_B1T = 0x01f2 # 0.0304 * 2^LUX_SCALE
TSL2561_LUX_M1T = 0x01be # 0.0272 * 2^LUX_SCALE
TSL2561_LUX_K2T = 0x0080 # 0.250 * 2^RATIO_SCALE
TSL2561_LUX_B2T = 0x0214 # 0.0325 * 2^LUX_SCALE
TSL2561_LUX_M2T = 0x02d1 # 0.0440 * 2^LUX_SCALE
TSL2561_LUX_K3T = 0x00c0 # 0.375 * 2^RATIO_SCALE
TSL2561_LUX_B3T = 0x023f # 0.0351 * 2^LUX_SCALE
TSL2561_LUX_M3T = 0x037b # 0.0544 * 2^LUX_SCALE
TSL2561_LUX_K4T = 0x0100 # 0.50 * 2^RATIO_SCALE
TSL2561_LUX_B4T = 0x0270 # 0.0381 * 2^LUX_SCALE
TSL2561_LUX_M4T = 0x03fe # 0.0624 * 2^LUX_SCALE
TSL2561_LUX_K5T = 0x0138 # 0.61 * 2^RATIO_SCALE
TSL2561_LUX_B5T = 0x016f # 0.0224 * 2^LUX_SCALE
TSL2561_LUX_M5T = 0x01fc # 0.0310 * 2^LUX_SCALE
TSL2561_LUX_K6T = 0x019a # 0.80 * 2^RATIO_SCALE
TSL2561_LUX_B6T = 0x00d2 # 0.0128 * 2^LUX_SCALE
TSL2561_LUX_M6T = 0x00fb # 0.0153 * 2^LUX_SCALE
TSL2561_LUX_K7T = 0x029a # 1.3 * 2^RATIO_SCALE
TSL2561_LUX_B7T = 0x0018 # 0.00146 * 2^LUX_SCALE
TSL2561_LUX_M7T = 0x0012 # 0.00112 * 2^LUX_SCALE
TSL2561_LUX_K8T = 0x029a # 1.3 * 2^RATIO_SCALE
TSL2561_LUX_B8T = 0x0000 # 0.000 * 2^LUX_SCALE
TSL2561_LUX_M8T = 0x0000 # 0.000 * 2^LUX_SCALE
# CS package values
TSL2561_LUX_K1C = 0x0043 # 0.130 * 2^RATIO_SCALE
TSL2561_LUX_B1C = 0x0204 # 0.0315 * 2^LUX_SCALE
TSL2561_LUX_M1C = 0x01ad # 0.0262 * 2^LUX_SCALE
TSL2561_LUX_K2C = 0x0085 # 0.260 * 2^RATIO_SCALE
TSL2561_LUX_B2C = 0x0228 # 0.0337 * 2^LUX_SCALE
TSL2561_LUX_M2C = 0x02c1 # 0.0430 * 2^LUX_SCALE
TSL2561_LUX_K3C = 0x00c8 # 0.390 * 2^RATIO_SCALE
TSL2561_LUX_B3C = 0x0253 # 0.0363 * 2^LUX_SCALE
TSL2561_LUX_M3C = 0x0363 # 0.0529 * 2^LUX_SCALE
TSL2561_LUX_K4C = 0x010a # 0.520 * 2^RATIO_SCALE
TSL2561_LUX_B4C = 0x0282 # 0.0392 * 2^LUX_SCALE
TSL2561_LUX_M4C = 0x03df # 0.0605 * 2^LUX_SCALE
TSL2561_LUX_K5C = 0x014d # 0.65 * 2^RATIO_SCALE
TSL2561_LUX_B5C = 0x0177 # 0.0229 * 2^LUX_SCALE
TSL2561_LUX_M5C = 0x01dd # 0.0291 * 2^LUX_SCALE
TSL2561_LUX_K6C = 0x019a # 0.80 * 2^RATIO_SCALE
TSL2561_LUX_B6C = 0x0101 # 0.0157 * 2^LUX_SCALE
TSL2561_LUX_M6C = 0x0127 # 0.0180 * 2^LUX_SCALE
TSL2561_LUX_K7C = 0x029a # 1.3 * 2^RATIO_SCALE
TSL2561_LUX_B7C = 0x0037 # 0.00338 * 2^LUX_SCALE
TSL2561_LUX_M7C = 0x002b # 0.00260 * 2^LUX_SCALE
TSL2561_LUX_K8C = 0x029a # 1.3 * 2^RATIO_SCALE
TSL2561_LUX_B8C = 0x0000 # 0.000 * 2^LUX_SCALE
TSL2561_LUX_M8C = 0x0000 # 0.000 * 2^LUX_SCALE
# Auto-gain thresholds
TSL2561_AGC_THI_13MS = 4850 # Max value at Ti 13ms = 5047
TSL2561_AGC_TLO_13MS = 100
TSL2561_AGC_THI_101MS = 36000 # Max value at Ti 101ms = 37177
TSL2561_AGC_TLO_101MS = 200
TSL2561_AGC_THI_402MS = 63000 # Max value at Ti 402ms = 65535
TSL2561_AGC_TLO_402MS = 500
# Clipping thresholds
TSL2561_CLIPPING_13MS = 4900
TSL2561_CLIPPING_101MS = 37000
TSL2561_CLIPPING_402MS = 65000
TSL2561_REGISTER_CONTROL = 0x00
TSL2561_REGISTER_TIMING = 0x01
TSL2561_REGISTER_THRESHHOLDL_LOW = 0x02
TSL2561_REGISTER_THRESHHOLDL_HIGH = 0x03
TSL2561_REGISTER_THRESHHOLDH_LOW = 0x04
TSL2561_REGISTER_THRESHHOLDH_HIGH = 0x05
TSL2561_REGISTER_INTERRUPT = 0x06
TSL2561_REGISTER_CRC = 0x08
TSL2561_REGISTER_ID = 0x0A
TSL2561_REGISTER_CHAN0_LOW = 0x0C
TSL2561_REGISTER_CHAN0_HIGH = 0x0D
TSL2561_REGISTER_CHAN1_LOW = 0x0E
TSL2561_REGISTER_CHAN1_HIGH = 0x0F
TSL2561_INTEGRATIONTIME_13MS = 0x00 # 13.7ms
TSL2561_INTEGRATIONTIME_101MS = 0x01 # 101ms
TSL2561_INTEGRATIONTIME_402MS = 0x02 # 402ms
TSL2561_DELAY_INTTIME_13MS = 0.015
TSL2561_DELAY_INTTIME_101MS = 0.120
TSL2561_DELAY_INTTIME_402MS = 0.450
TSL2561_GAIN_1X = 0x00 # No gain
TSL2561_GAIN_16X = 0x10 # 16x gain
TSL2561_NO_OF_AVG_SAMPLES = 25 # How many samples to make an average reading
def write8(self, reg, value):
"""
Writes a register and an 8 bit value over I2C
:param reg: Register / Address to write value to
:param value: Byte to write to Address
"""
logging.debug('write8')
self._i2c.write8(reg, value)
logging.debug('write8_end')
def read8(self, reg):
"""
Reads an 8 bit value over I2C
:param reg: Register / Address to read value from
:return: Unsigned byte
"""
logging.debug('read8')
return self._i2c.readU8(reg)
def read16(self, reg):
"""
Reads a 16 bit values over I2C
:param reg: Register / Address to read value from
:return: Unsigned word
"""
logging.debug('read16')
return self._i2c.readU16(reg)
def enable(self):
"""
Enables the device
"""
logging.debug('enable')
# Enable the device by setting the control bit to 0x03
self._i2c.write8(self.TSL2561_COMMAND_BIT |
self.TSL2561_REGISTER_CONTROL,
self.TSL2561_CONTROL_POWERON)
logging.debug('enable_end')
def disable(self):
"""
Disables the device (putting it in lower power sleep mode)
"""
logging.debug('disable')
# Turn the device off to save power
self._i2c.write8(self.TSL2561_COMMAND_BIT |
self.TSL2561_REGISTER_CONTROL,
self.TSL2561_CONTROL_POWEROFF)
logging.debug('disable_end')
def get_data(self):
"""
Private function to read luminosity on both channels
"""
logging.debug('get_data')
# Enables the device by setting the control bit to 0x03
self.enable()
# Wait x ms for ADC to complete
if self._tsl2561IntegrationTime == self.TSL2561_INTEGRATIONTIME_13MS:
time.sleep(self.TSL2561_DELAY_INTTIME_13MS)
elif self._tsl2561IntegrationTime == self.TSL2561_INTEGRATIONTIME_101MS:
time.sleep(self.TSL2561_DELAY_INTTIME_101MS)
else:
time.sleep(self.TSL2561_DELAY_INTTIME_402MS)
# Reads a two byte value from channel 0 (visible + infrared)
# noinspection PyPep8
self._broadband = self.read16(self.TSL2561_COMMAND_BIT |
self.TSL2561_WORD_BIT |
self.TSL2561_REGISTER_CHAN0_LOW)
# Reads a two byte value from channel 1 (infrared)
self._ir = self.read16(self.TSL2561_COMMAND_BIT |
self.TSL2561_WORD_BIT |
self.TSL2561_REGISTER_CHAN1_LOW)
# Turn the device off to save power
self.disable()
logging.debug('getData_end"')
# noinspection PyMissingConstructor
def __init__(self, address=TSL2561_ADDR_FLOAT, debug=False):
"""
Constructor
:param address: I2C address of TSL2561, defaults to 0x39
:param debug: Turn on debugging, defaults to False
"""
self._debug = debug
logging.debug('__init__"')
self._address = address
self._tsl2561Initialised = False
self._tsl2561AutoGain = False
self._tsl2561IntegrationTime = self.TSL2561_INTEGRATIONTIME_13MS
self._tsl2561Gain = self.TSL2561_GAIN_1X
self._i2c = Adafruit_I2C(self._address)
self._luminosity = 0
self._broadband = 0
self._ir = 0
logging.debug('__init___end')
def begin(self):
"""
Initializes I2C and configures the sensor (call this function before
doing anything else)
Note: by default, the device is in power down mode on bootup
:return: True if connected to a TSL2561
"""
logging.debug('begin')
# Make sure we're actually connected
x = self.read8(self.TSL2561_REGISTER_ID)
if not(x & 0x0A):
return False
self._tsl2561Initialised = True
# Set default integration time and gain
self.set_integration_time(self._tsl2561IntegrationTime)
self.set_gain(self._tsl2561Gain)
# Note: by default, the device is in power down mode on bootup
self.disable()
logging.debug('begin_end')
return True
def enable_auto_gain(self, enable):
"""
Enables or disables the auto-gain settings when reading
data from the sensor
:param enable: True to enable
"""
logging.debug('enable_auto_gain')
if enable:
self._tsl2561AutoGain = enable
else:
self._tsl2561AutoGain = False
logging.debug('enableAutoGain_end')
def set_integration_time(self, integration_time):
"""
Sets the integration integration_time for the TSL2561
:param integration_time:
:return:
"""
logging.debug('set_integration_time')
if not self._tsl2561Initialised:
self.begin()
# Enable the device by setting the control bit to 0x03
self.enable()
# Update the timing register
self.write8(self.TSL2561_COMMAND_BIT |
self.TSL2561_REGISTER_TIMING, integration_time |
self._tsl2561Gain)
# Update value placeholders
self._tsl2561IntegrationTime = integration_time
# Turn the device off to save power
self.disable()
logging.debug('setIntegrationTime_end')
def set_gain(self, gain):
"""
Adjusts the gain on the TSL2561 (adjusts the sensitivity to light)
:param gain:
"""
logging.debug('set_gain')
if not self._tsl2561Initialised:
self.begin()
# Enable the device by setting the control bit to 0x03
self.enable()
# Update the timing register
self.write8(self.TSL2561_COMMAND_BIT |
self.TSL2561_REGISTER_TIMING,
self._tsl2561IntegrationTime | gain)
# Update value placeholders
self._tsl2561Gain = gain
# Turn the device off to save power
self.disable()
logging.debug('setGain_end')
def get_luminosity(self):
"""
Gets the broadband (mixed lighting) and IR only values from
the TSL2561, adjusting gain if auto-gain is enabled
"""
logging.debug('get_luminosity')
valid = False
if not self._tsl2561Initialised:
self.begin()
# If Auto gain disabled get a single reading and continue
if not self._tsl2561AutoGain:
self.get_data()
return
# Read data until we find a valid range
agc_check = False
while not valid:
_it = self._tsl2561IntegrationTime
# Get the hi/low threshold for the current integration time
if _it==self.TSL2561_INTEGRATIONTIME_13MS:
_hi = self.TSL2561_AGC_THI_13MS
_lo = self.TSL2561_AGC_TLO_13MS
elif _it==self.TSL2561_INTEGRATIONTIME_101MS:
_hi = self.TSL2561_AGC_THI_101MS
_lo = self.TSL2561_AGC_TLO_101MS
else:
_hi = self.TSL2561_AGC_THI_402MS
_lo = self.TSL2561_AGC_TLO_402MS
self.get_data()
# Run an auto-gain check if we haven't already done so ...
if not agc_check:
if (self._broadband < _lo) and \
(self._tsl2561Gain == self.TSL2561_GAIN_1X):
# Increase the gain and try again
self.set_gain(self.TSL2561_GAIN_16X)
# Drop the previous conversion results
self.get_data()
# Set a flag to indicate we've adjusted the gain
agc_check = True
elif (self._broadband > _hi) and \
(self._tsl2561Gain == self.TSL2561_GAIN_16X):
# Drop gain to 1x and try again
self.set_gain(self.TSL2561_GAIN_1X)
# Drop the previous conversion results
self.get_data()
# Set a flag to indicate we've adjusted the gain
agc_check = True
else:
# Nothing to look at here, keep moving ....
# Reading is either valid, or we're already at the chip's
# limits
valid = True
else:
# If we've already adjusted the gain once, just return the new
# results. This avoids endless loops where a value is at one
# extreme pre-gain, and the the other extreme post-gain
valid = True
logging.debug('getLuminosity_end')
def calculate_lux(self):
"""
Converts the raw sensor values to the standard SI lux equivalent.
Returns 0 if the sensor is saturated and the values are unreliable.
:return: lux value, unsigned 16bit word (0 - 65535)
:raises: OverflowError when TSL2561 sensor is saturated
"""
logging.debug('calculate_lux')
self.get_luminosity()
# Make sure the sensor isn't saturated!
if self._tsl2561IntegrationTime == self.TSL2561_INTEGRATIONTIME_13MS:
clip_threshold = self.TSL2561_CLIPPING_13MS
elif self._tsl2561IntegrationTime == self.TSL2561_INTEGRATIONTIME_101MS:
clip_threshold = self.TSL2561_CLIPPING_101MS
else:
clip_threshold = self.TSL2561_CLIPPING_402MS
# Raise exception if either or both sensor channels are saturated
if (self._broadband > clip_threshold) and (self._ir > clip_threshold):
raise OverflowError('TSL2561 Sensor Saturated (both channels)')
elif (self._broadband > clip_threshold):
raise OverflowError('TSL2561 Sensor Saturated (broadband channel)')
elif (self._ir > clip_threshold):
raise OverflowError('TSL2561 Sensor Saturated (IR channel)')
# Get the correct scale depending on the integration time
if self._tsl2561IntegrationTime ==self.TSL2561_INTEGRATIONTIME_13MS:
ch_scale = self.TSL2561_LUX_CHSCALE_TINT0
elif self._tsl2561IntegrationTime ==self.TSL2561_INTEGRATIONTIME_101MS:
ch_scale = self.TSL2561_LUX_CHSCALE_TINT1
else:
ch_scale = 1 << self.TSL2561_LUX_CHSCALE
# Scale for gain (1x or 16x)
if not self._tsl2561Gain:
ch_scale <<= 4
# Scale the channel values
channel0 = (self._broadband * ch_scale) >> self.TSL2561_LUX_CHSCALE
channel1 = (self._ir * ch_scale) >> self.TSL2561_LUX_CHSCALE
# Find the ratio of the channel values (Channel1/Channel0)
ratio1 = 0
if channel0 != 0:
ratio1 = (channel1 << (self.TSL2561_LUX_RATIOSCALE + 1)) / channel0
# round the ratio value
ratio = (ratio1 + 1) >> 1
if self.TSL2561_PACKAGE_CS == 1:
if (ratio >= 0) and (ratio <= self.TSL2561_LUX_K1C):
b=self.TSL2561_LUX_B1C
m=self.TSL2561_LUX_M1C
elif ratio <= self.TSL2561_LUX_K2C:
b=self.TSL2561_LUX_B2C
m=self.TSL2561_LUX_M2C
elif ratio <= self.TSL2561_LUX_K3C:
b=self.TSL2561_LUX_B3C
m=self.TSL2561_LUX_M3C
elif ratio <= self.TSL2561_LUX_K4C:
b=self.TSL2561_LUX_B4C
m=self.TSL2561_LUX_M4C
elif ratio <= self.TSL2561_LUX_K5C:
b=self.TSL2561_LUX_B5C
m=self.TSL2561_LUX_M5C
elif ratio <= self.TSL2561_LUX_K6C:
b=self.TSL2561_LUX_B6C
m=self.TSL2561_LUX_M6C
elif ratio <= self.TSL2561_LUX_K7C:
b=self.TSL2561_LUX_B7C
m=self.TSL2561_LUX_M7C
elif ratio > self.TSL2561_LUX_K8C:
b=self.TSL2561_LUX_B8C
m=self.TSL2561_LUX_M8C
elif self.TSL2561_PACKAGE_T_FN_CL == 1:
if (ratio >= 0) and (ratio <= self.TSL2561_LUX_K1T):
b=self.TSL2561_LUX_B1T
m=self.TSL2561_LUX_M1T
elif ratio <= self.TSL2561_LUX_K2T:
b=self.TSL2561_LUX_B2T
m=self.TSL2561_LUX_M2T
elif ratio <= self.TSL2561_LUX_K3T:
b=self.TSL2561_LUX_B3T
m=self.TSL2561_LUX_M3T
elif ratio <= self.TSL2561_LUX_K4T:
b=self.TSL2561_LUX_B4T
m=self.TSL2561_LUX_M4T
elif ratio <= self.TSL2561_LUX_K5T:
b=self.TSL2561_LUX_B5T
m=self.TSL2561_LUX_M5T
elif ratio <= self.TSL2561_LUX_K6T:
b=self.TSL2561_LUX_B6T
m=self.TSL2561_LUX_M6T
elif ratio <= self.TSL2561_LUX_K7T:
b=self.TSL2561_LUX_B7T
m=self.TSL2561_LUX_M7T
elif ratio > self.TSL2561_LUX_K8T:
b=self.TSL2561_LUX_B8T
m=self.TSL2561_LUX_M8T
# endif
# noinspection PyUnboundLocalVariable,PyUnboundLocalVariable
temp = (channel0 * b) - (channel1 * m)
# Do not allow negative lux value
if temp < 0:
temp = 0
# Round lsb (2^(LUX_SCALE-1))
temp += 1 << (self.TSL2561_LUX_LUXSCALE - 1)
# Strip off fractional portion
lux = temp >> self.TSL2561_LUX_LUXSCALE
# Signal I2C had no errors
logging.debug('calculateLux_end')
return lux
def calculate_avg_lux(self, testavg=TSL2561_NO_OF_AVG_SAMPLES):
"""
Calculates an averaged Lux value, useful for flickering lights and for
smoothing values due to noise
:param testavg: Number of samples to take in a reading, defaults to 25
:return: lux value, unsigned 16bit word (0 - 65535)
"""
# Set initial vars
count = 0
luxavgtotal = 0
# Create a cumulative total of values for 'testavg' tests
while True:
capture = self.calculate_lux()
luxavgtotal += capture
count += 1
# Once we reach the number of required tests, work out the average
if count >= testavg:
luxavg = round(luxavgtotal / testavg)
return luxavg
if __name__ == "__main__":
LightSensor = Adafruit_TSL2561()
LightSensor.enable_auto_gain(True)
# See if "loop" has been passed as an arg.
try:
arg = sys.argv[1]
if arg == "loop":
while True:
try:
print(int(LightSensor.calculate_avg_lux()))
except OverflowError as e:
print(e)
except KeyboardInterrupt:
quit()
else:
print("Invalid arg(s):", sys.argv[1])
except IndexError:
print(int(LightSensor.calculate_avg_lux()))
|
the-stack_0_21842 | # Load model and tokenizer
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from transformers import pipeline
import torch
import numpy as np
class BertGradient:
def __init__(self):
model_name = "textattack/distilbert-base-uncased-imdb"
self.model = AutoModelForSequenceClassification.from_pretrained(model_name, cache_dir='./data').cuda()
self.tokenizer = AutoTokenizer.from_pretrained(model_name, add_prefix_space=True)
self.embeddings = None
self.embeddings_gradients = None
handle = self._register_embedding_list_hook()
hook = self._register_embedding_gradient_hooks()
def _register_embedding_list_hook(self):
def forward_hook(module, inputs, output):
self.embeddings = output.squeeze(0).clone().cpu().detach().numpy()
embedding_layer = self.model.get_input_embeddings()
handle = embedding_layer.register_forward_hook(forward_hook)
return handle
def _register_embedding_gradient_hooks(self):
def hook_layers(module, grad_in, grad_out):
self.embeddings_gradients = grad_out[0].cpu().numpy()
embedding_layer = self.model.get_input_embeddings()
hook = embedding_layer.register_backward_hook(hook_layers)
return hook
def embedding(self, inp):
x = self.tokenizer(inp, is_split_into_words=True, return_tensors='pt',
padding='max_length', truncation=True, max_length=45)
x = self.assign_gpu(x)
return self.model.get_input_embeddings()(x['input_ids']).detach().flatten(start_dim=1).cpu().numpy()
def bbsds(self, inp, batch_size=128):
x = self.tokenizer(inp, is_split_into_words=True, return_tensors='pt',
padding='max_length', truncation=True, max_length=45)
dataset = torch.utils.data.TensorDataset(x['input_ids'])
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
all_out = np.zeros((0,2))
for b in dataloader:
batch = b[0].cuda()
out = torch.softmax(self.model(batch).logits, dim=-1).detach().cpu().numpy()
all_out = np.concatenate((all_out, out), axis=0)
return all_out
def assign_gpu(self, x):
input_ids = x['input_ids'].to('cuda:0')
attention_mask = x['attention_mask'].to('cuda:0')
output = {'input_ids': input_ids,
'attention_mask': attention_mask}
return output
def grad_x_input(self, inp, batch_size=128):
x = self.tokenizer(inp, is_split_into_words=True, return_tensors='pt',
padding='max_length', truncation=True, max_length=45)
dataset = torch.utils.data.TensorDataset(x['input_ids'])
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
all_embeds = np.zeros((0,34560))
for b in dataloader:
batch = b[0].cuda()
self.embeddings_list = []
self.embeddings_gradients = []
output = torch.softmax(self.model(batch).logits, dim=-1)
ind = output.data.max(1)[1]
probvalue = 1.0
grad_out = output.data.clone()
grad_out.fill_(0.0)
grad_out.scatter_(1, ind.unsqueeze(0).t(), probvalue)
self.model.zero_grad()
output.backward(grad_out)
gxi = (self.embeddings_gradients * self.embeddings).reshape(self.embeddings.shape[0],-1)
all_embeds = np.concatenate((all_embeds, gxi),axis=0)
return all_embeds
|
the-stack_0_21844 | #/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the terms found in the LICENSE file in the root of this source tree.
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * [email protected]
# */
#---------------------------------------------------------------------
import os
import re
import sys
import subprocess
class verifySanityCheckDeployment():
def __init__(self):
self.job_name = ''
def checkLogs(self):
hss_status = self.analyze_check_run_log('HSS')
mme_status = self.analyze_check_run_log('MME')
if not hss_status:
print ('HSS did not deploy properly')
if not mme_status:
print ('MME did not deploy properly')
if not hss_status or not mme_status:
sys.exit('Sanity Check Deployment went wrong')
else:
print ('Sanity Check Deployment is OK')
def analyze_check_run_log(self, nfType):
logFileName = nfType.lower() + '_check_run.log'
cwd = os.getcwd()
status = False
if os.path.isfile(cwd + '/archives/' + logFileName):
myCmd = 'iconv -f ISO-8859-1 -t UTF-8//TRANSLIT ' + cwd + '/archives/' + logFileName + ' -o ' + cwd + '/archives/' + logFileName + '.conv'
subprocess.run(myCmd, shell=True)
myCmd = 'mv ' + cwd + '/archives/' + logFileName + '.conv ' + cwd + '/archives/' + logFileName
subprocess.run(myCmd, shell=True)
nb_opc_generation = 0
freeDiameterUp = False
connectionWithMME = False
connectionWithHSS = False
sctp_status = False
with open(cwd + '/archives/' + logFileName, 'r') as logfile:
for line in logfile:
result = re.search('Compute opc', line)
if result is not None:
nb_opc_generation += 1
result = re.search('The freeDiameter engine has been started|Diameter identity of MME', line)
if result is not None:
freeDiameterUp = True
result = re.search('STATE_OPEN.*mme', line)
if result is not None:
connectionWithMME = True
result = re.search('Peer hss.* is now connected', line)
if result is not None:
connectionWithHSS = True
result = re.search('Received SCTP_INIT_MSG', line)
if result is not None:
sctp_status = True
logfile.close()
if nfType == 'HSS':
if nb_opc_generation > 0 and freeDiameterUp and connectionWithMME:
status = True
if nfType == 'MME':
if freeDiameterUp and connectionWithHSS and sctp_status:
status = True
return status
def Usage():
print('----------------------------------------------------------------------------------------------------------------------')
print('verifySanityCheckDeployment.py')
print(' Verify the Sanity Check Deployment in the pipeline.')
print('----------------------------------------------------------------------------------------------------------------------')
print('Usage: python3 verifySanityCheckDeployment.py [options]')
print(' --help Show this help.')
print('---------------------------------------------------------------------------------------------- Mandatory Options -----')
print(' --job_name=[Jenkins Job name]')
print(' --job_id=[Jenkins Job Build ID]')
#--------------------------------------------------------------------------------------------------------
#
# Start of main
#
#--------------------------------------------------------------------------------------------------------
argvs = sys.argv
argc = len(argvs)
vscd = verifySanityCheckDeployment()
while len(argvs) > 1:
myArgv = argvs.pop(1)
if re.match('^\-\-help$', myArgv, re.IGNORECASE):
Usage()
sys.exit(0)
elif re.match('^\-\-job_name=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-job_name=(.+)$', myArgv, re.IGNORECASE)
vscd.job_name = matchReg.group(1)
elif re.match('^\-\-job_id=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-job_id=(.+)$', myArgv, re.IGNORECASE)
vscd.job_id = matchReg.group(1)
else:
sys.exit('Invalid Parameter: ' + myArgv)
if vscd.job_name == '' or vscd.job_id == '':
sys.exit('Missing Parameter in job description')
vscd.checkLogs()
|
the-stack_0_21845 | disp_avlbl = True
import os
if 'DISPLAY' not in os.environ:
disp_avlbl = False
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import scipy.io as sio
import scipy.sparse as sp
import scipy.sparse.linalg as lg
from time import time
import sys
sys.path.append('./')
sys.path.append(os.path.realpath(__file__))
from .static_graph_embedding import StaticGraphEmbedding
from gem.utils import graph_util, plot_util
from gem.evaluation import visualize_embedding as viz
class LaplacianEigenmaps(StaticGraphEmbedding):
def __init__(self, *hyper_dict, **kwargs):
''' Initialize the LaplacianEigenmaps class
Args:
d: dimension of the embedding
'''
hyper_params = {
'method_name': 'lap_eigmap_svd'
}
hyper_params.update(kwargs)
for key in hyper_params.keys():
self.__setattr__('_%s' % key, hyper_params[key])
for dictionary in hyper_dict:
for key in dictionary:
self.__setattr__('_%s' % key, dictionary[key])
def get_method_name(self):
return self._method_name
def get_method_summary(self):
return '%s_%d' % (self._method_name, self._d)
def learn_embedding(self, graph=None, edge_f=None,
is_weighted=False, no_python=False):
if not graph and not edge_f:
raise Exception('graph/edge_f needed')
if not graph:
graph = graph_util.loadGraphFromEdgeListTxt(edge_f)
graph = graph.to_undirected()
t1 = time()
L_sym = nx.normalized_laplacian_matrix(graph)
w, v = lg.eigs(L_sym, k=self._d + 1, which='SM')
idx = np.argsort(w) # sort eigenvalues
w = w[idx]
v = v[:, idx]
t2 = time()
self._X = v[:, 1:]
p_d_p_t = np.dot(v, np.dot(np.diag(w), v.T))
eig_err = np.linalg.norm(p_d_p_t - L_sym)
print('Laplacian matrix recon. error (low rank): %f' % eig_err)
return self._X.real, (t2 - t1)
def get_embedding(self):
return self._X
def get_edge_weight(self, i, j):
return np.exp(
-np.power(np.linalg.norm(self._X[i, :] - self._X[j, :]), 2)
)
def get_reconstructed_adj(self, X=None, node_l=None):
if X is not None:
node_num = X.shape[0]
self._X = X
else:
node_num = self._node_num
adj_mtx_r = np.zeros((node_num, node_num))
for v_i in range(node_num):
for v_j in range(node_num):
if v_i == v_j:
continue
adj_mtx_r[v_i, v_j] = self.get_edge_weight(v_i, v_j)
return adj_mtx_r
if __name__ == '__main__':
# load Zachary's Karate graph
edge_f = 'data/karate.edgelist'
G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=False)
G = G.to_directed()
res_pre = 'results/testKarate'
graph_util.print_graph_stats(G)
t1 = time()
embedding = LaplacianEigenmaps(2)
embedding.learn_embedding(graph=G, edge_f=None,
is_weighted=True, no_python=True)
print('Laplacian Eigenmaps:\n\tTraining time: %f' % (time() - t1))
viz.plot_embedding2D(embedding.get_embedding(),
di_graph=G, node_colors=None)
plt.show()
|
the-stack_0_21846 | # -*- coding: utf-8 -*-
import os
import re
import sys
import string
import random
import secp256k1
from secp256k1 import PrivateKey
from Crypto.Hash import keccak as keccaklib
from ethereum.utils import sha3
from ethereum.utils import remove_0x_head
import raiden
__all__ = (
'sha3',
'keccak_256',
'keccak',
'ishash',
'isaddress',
'make_address',
'make_privkey_address',
'publickey_to_address',
'privatekey_to_address',
'pex',
'lpex',
'get_contract_path',
'safe_lstrip_hex',
'camel_to_snake_case'
)
LETTERS = string.printable
# From the secp256k1 header file:
#
# The purpose of context structures is to cache large precomputed data tables
# that are expensive to construct, and also to maintain the randomization data
# for blinding.
#
# Do not create a new context object for each operation, as construction is
# far slower than all other API calls (~100 times slower than an ECDSA
# verification).
#
# A constructed context can safely be used from multiple threads
# simultaneously, but API call that take a non-const pointer to a context
# need exclusive access to it. In particular this is the case for
# secp256k1_context_destroy and secp256k1_context_randomize.
#
# Regarding randomization, either do it once at creation time (in which case
# you do not need any locking for the other calls), or use a read-write lock.
#
GLOBAL_CTX = secp256k1.lib.secp256k1_context_create(secp256k1.ALL_FLAGS)
def safe_address_decode(address):
try:
address = address.decode('hex')
except TypeError:
pass
return address
def keccak_256(data):
return keccaklib.new(digest_bits=256, data=data)
def keccak(seed):
return keccak_256(seed).digest()
def ishash(data):
return isinstance(data, (bytes, bytearray)) and len(data) == 32
def isaddress(data):
return isinstance(data, (bytes, bytearray)) and len(data) == 20
def make_address():
return bytes(''.join(random.choice(LETTERS) for _ in range(20)))
def make_privkey_address():
private_key_bin = sha3(''.join(random.choice(LETTERS) for _ in range(20)))
privkey = PrivateKey(
private_key_bin,
ctx=GLOBAL_CTX,
raw=True,
)
pubkey = privkey.pubkey.serialize(compressed=False)
address = publickey_to_address(pubkey)
return privkey, address
def pex(data):
return str(data).encode('hex')[:8]
def lpex(lst):
return [pex(l) for l in lst]
def activate_ultratb():
from IPython.core import ultratb
sys.excepthook = ultratb.VerboseTB(call_pdb=True, tb_offset=6)
def host_port_to_endpoint(host, port):
return "{}:{}".format(host, port)
def split_endpoint(endpoint):
host, port = endpoint.split(':')[:2]
port = int(port)
return (host, port)
def publickey_to_address(publickey):
return sha3(publickey[1:])[12:]
def privatekey_to_address(private_key_bin):
private_key = PrivateKey(
private_key_bin,
ctx=GLOBAL_CTX,
raw=True,
)
pubkey = private_key.pubkey.serialize(compressed=False)
return publickey_to_address(pubkey)
def get_project_root():
return os.path.dirname(raiden.__file__)
def get_contract_path(contract_name):
contract_path = os.path.join(
get_project_root(),
'smart_contracts',
contract_name
)
return os.path.realpath(contract_path)
def safe_lstrip_hex(val):
if isinstance(val, basestring):
return remove_0x_head(val)
return val
def get_encoded_transfers(their_transfer, our_transfer):
"""Check for input sanity and return the encoded version of the transfers"""
if not their_transfer and our_transfer:
raise ValueError(
"There is no reason to provide our_transfer when their_transfer"
" is not provided"
)
their_encoded = their_transfer.encode() if their_transfer else ""
our_encoded = our_transfer.encode() if our_transfer else ""
return their_encoded, our_encoded
def camel_to_snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def snake_to_camel_case(snake_string):
return snake_string.title().replace("_", "")
def channel_to_api_dict(channel):
"""Takes in a Channel Object and turns it into a dictionary for
usage in the REST API. Decoding from binary to hex happens through
the marshmallow AddressField in encoding.py.
"""
return {
"channel_address": channel.channel_address,
"token_address": channel.token_address,
"partner_address": channel.partner_address,
"settle_timeout": channel.settle_timeout,
"balance": channel.contract_balance,
"state": channel.state
}
|
the-stack_0_21847 | #!/usr/bin/env python3
# coding=utf-8
#
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from core.constants import ToolCommandType
from core.utils import get_file_list
from core.utils import get_file_list_by_postfix
from core.utils import get_build_output_path
from core.utils import scan_support_product
from core.config.config_manager import UserConfigManager
from core.config.config_manager import FrameworkConfigManager
from core.config.parse_parts_config import ParsePartsConfig
CMD_KEY_PRODUCTLIST = "productlist"
CMD_KEY_TYPELIST = "typelist"
CMD_KEY_SUBSYSTEMLIST = "subsystemlist"
CMD_KEY_PARTLIST = "partlist"
CMD_KEY_MODULELIST = "modulelist"
TOOL_VERSION_INFO = """Welcome to DeveloperTest V1.0.0.
"""
HLEP_COMMAND_INFOMATION = """use help [follow command] for more information:
""" + \
"show: " + """Display a list of supported show command.
""" + \
"run: " + """Display a list of supported run command.
""" + \
"list: " + """Display a list of supported device.
""" + \
"quit: " + """Exit the test framework application.
"""
SUPPORT_COMMAND_SHOW = """use show [follow command] for more information:
""" + \
"productlist" + """
""" + \
"typelist" + """
""" + \
"subsystemlist" + """
""" + \
"partlist" + """
""" + \
"modulelist" + """
"""
RUNCASES_INFOMATION = """run:
This command is used to execute the selected testcases.
It includes a series of processes such as use case compilation, \
execution, and result collection.
usage: run [-p PRODUCTFORM]
[-t [TESTTYPE [TESTTYPE ...]]]
[-ss [SUBSYSTEM [SUBSYSTEM ...]]]
[-tp [TESTPART [TESTPART ...]]]
[-tm TESTMODULE]
[-ts TESTSUIT]
[-tc TESTCASE]
[-tl TESTLEVEL]
optional arguments:
-p PRODUCTFORM, --productform PRODUCTFORM
Specified product form
-t [TESTTYPE [TESTTYPE ...]], --testtype [TESTTYPE [TESTTYPE ...]]
Specify test type(UT,MST,ST,PERF,ALL)
-ss [SUBSYSTEM [SUBSYSTEM ...]], --subsystem [SUBSYSTEM [SUBSYSTEM ...]]
Specify test subsystem
-tp [TESTPART [TESTPART ...]], --testpart [TESTPART [TESTPART ...]]
Specify test testpart
-tm TESTMODULE, --testmodule TESTMODULE
Specified test module
-ts TESTSUIT, --testsuit TESTSUIT
Specify test suit
-tc TESTCASE, --testcase TESTCASE
Specify test case
-tl TESTLEVEL, --testlevel TESTLEVEL
Examples:
run -t UT
run -t UT -ss aafwk
run -t UT -ss aafwk -tm base_test
run -t UT -ss aafwk -tm base_test -ts base_test
run -t UT -ss aafwk -tm base_test -ts base_test -tl 2
run -t UT -ss aafwk -tm base_test -ts base_test -tc \
AAFwkBaseTest.*
run -t UT -ss aafwk -tm base_test -ts base_test -tc \
AAFwkBaseTest.object_test_001
run -t MST
...
run -t ALL
...
"""
LIST_INFOMATION = "list\n" + """
This command is used to display device list.
"""
QUIT_INFOMATION = "quit\n" + """
This command is used to exit the test framework application.
"""
#############################################################################
#############################################################################
def select_user_input(data_list):
data_item_count = len(data_list)
select_item_value = ""
select_item_index = -1
if len(data_list) != 0:
count = 0
while True:
input_data = input("")
if "" != input_data and input_data.isdigit():
input_num = int(input_data)
if input_num > 0 and (input_num <= data_item_count):
select_item_index = input_num - 1
select_item_value = data_list[input_num - 1]
break
else:
print("The data you entered is out of range, \
please re-enter:")
count += 1
else:
if "" == input_data:
select_item_index = 0
select_item_value = data_list[0]
break
else:
print("You entered a non-numeric character, \
please re-enter:")
count += 1
if count >= 3:
print("You entered the error three times in a row, \
exit the frame.")
quit()
sys.exit(0)
return select_item_value, select_item_index
def select_productform():
select_value = "phone"
scan_product_list = scan_support_product()
config_product_list = \
FrameworkConfigManager().get_framework_config("productform")
productform_list = scan_product_list + config_product_list
if len(productform_list) != 0:
print("Please select the current tested product form:")
for index, element in enumerate(productform_list):
print("%d. %s" % (index + 1, element))
print("default is [1] %s" % productform_list[0])
select_value, _ = select_user_input(productform_list)
print(select_value)
return select_value
def show_wizard_mode():
wizard_data_dic = {}
print("+++++++++++++++++++++++++++++++++++++++++++++")
productform = select_productform()
if productform == "":
productform = "phone"
wizard_data_dic["productform"] = productform
print("+++++++++++++++++++++++++++++++++++++++++++++")
print("The environment is ready, please use the run command to test.")
return wizard_data_dic
#############################################################################
#############################################################################
def display_help_info(para_list):
if len(para_list) == 0 or para_list[0] != ToolCommandType.TOOLCMD_KEY_HELP:
print("This command is not support.")
return
if len(para_list) > 1:
display_help_command_info(para_list[1])
else:
print(TOOL_VERSION_INFO)
print(HLEP_COMMAND_INFOMATION)
def display_show_info(para_list, productform):
if len(para_list) == 0 or para_list[0] != ToolCommandType.TOOLCMD_KEY_SHOW:
print("This command is not support.")
return
if len(para_list) > 1:
display_show_command_info(para_list[1], productform)
else:
print(SUPPORT_COMMAND_SHOW)
#############################################################################
#############################################################################
def get_module_list_from_output_dir(product_form):
module_path_list = []
all_product_list = scan_support_product()
if product_form in all_product_list:
module_list_file_path = os.path.join(
get_build_output_path(product_form),
"module_list_files")
else:
module_list_file_path = os.path.join(
get_build_output_path(product_form),
"test_info",
"module_list_files")
print(module_list_file_path)
if os.path.exists(module_list_file_path):
file_list = get_file_list_by_postfix(module_list_file_path, ".mlf")
for file in file_list:
module_path = \
file[len(module_list_file_path) + 1: file.rfind(os.sep)]
if module_path != "" and module_path not in module_path_list:
module_path_list.append(module_path)
else:
print("%s does not exist." % module_list_file_path)
module_path_list.sort()
return module_path_list
def get_module_list_from_case_dir(test_case_dir):
file_list = []
test_case_tests_path = test_case_dir
if not os.path.exists(test_case_tests_path):
return file_list
for test_type in os.listdir(test_case_tests_path):
file_path = os.path.join(test_case_tests_path, test_type)
for dirs in os.walk(file_path):
files = get_file_list(find_path=dirs[0])
for file_name in files:
if "" != file_name and -1 == file_name.find(__file__):
file_name = os.path.join(dirs[0], file_name)
if os.path.isfile(file_name):
file_name = file_name[len(file_path) + 1: \
file_name.rfind(os.sep)]
file_list.append(file_name)
return file_list
def get_module_list(product_form):
module_path_list = []
testcase_dir = UserConfigManager().get_test_cases_dir()
if testcase_dir == "":
module_path_list = get_module_list_from_output_dir(product_form)
else:
module_path_list = get_module_list_from_case_dir(testcase_dir)
return module_path_list
#############################################################################
#############################################################################
def show_product_list():
print("List of currently supported productform:")
scan_product_list = scan_support_product()
config_product_list = \
FrameworkConfigManager().get_framework_config("productform")
productform_list = scan_product_list + config_product_list
if 0 != len(productform_list):
for index, element in enumerate(productform_list):
print(" %d. %s" % (index + 1, element))
else:
print("No category specified.")
def show_testtype_list():
print("List of currently supported test types:")
testtype_list = FrameworkConfigManager().get_framework_config(
"test_category")
if 0 != len(testtype_list):
for index, element in enumerate(testtype_list):
print(" %d. %s" % (index + 1, element))
else:
print("No category specified.")
def show_subsystem_list(product_form):
print("List of currently supported subsystem names:")
parser = ParsePartsConfig(product_form)
subsystem_name_list = parser.get_subsystem_name_list()
if len(subsystem_name_list) == 0:
return
subsystem_name_list.sort()
for index, element in enumerate(subsystem_name_list):
print(" %d. %s" % (index + 1, element))
def show_partname_list(product_form):
print("List of currently supported part names:")
parser = ParsePartsConfig(product_form)
subsystem_name_list = parser.get_subsystem_name_list()
if len(subsystem_name_list) == 0:
return
subsystem_name_list.sort()
subsystem_infos = parser.get_subsystem_infos()
for subsystem in subsystem_name_list:
print("%s:" % subsystem)
part_name_list = subsystem_infos[subsystem]
part_name_list.sort()
for index, element in enumerate(part_name_list):
print(" %d. %s" % (index + 1, element))
def show_module_list(product_form):
print("List of currently supported module names:")
subsystem_name_list = []
subsystem_module_list = get_module_list(product_form)
for item in subsystem_module_list:
if item != "":
subsystem_name = item.split(os.sep)[0]
if subsystem_name not in subsystem_name_list:
subsystem_name_list.append(subsystem_name)
for subsystem_name in subsystem_name_list:
print("%s:" % subsystem_name)
index = 0
module_value_list = []
for item in subsystem_module_list:
find_key = subsystem_name + os.sep
pos_subsystem = item.find(find_key)
if pos_subsystem >= 0:
subsystem_module_dir = \
item[pos_subsystem + len(find_key):len(item)]
module_value = subsystem_module_dir.split(os.sep)[0]
if module_value not in module_value_list:
module_value_list.append(module_value)
index += 1
print(" %d. %s" % (index, module_value))
def display_help_command_info(command):
if command == ToolCommandType.TOOLCMD_KEY_SHOW:
print(SUPPORT_COMMAND_SHOW)
elif command == ToolCommandType.TOOLCMD_KEY_RUN:
print(RUNCASES_INFOMATION)
elif command == ToolCommandType.TOOLCMD_KEY_LIST:
print(LIST_INFOMATION)
elif command == ToolCommandType.TOOLCMD_KEY_QUIT:
print(QUIT_INFOMATION)
else:
print("'%s' command no help information." % command)
def display_show_command_info(command, product_form="phone"):
if command == CMD_KEY_PRODUCTLIST:
show_product_list()
elif command == CMD_KEY_TYPELIST:
show_testtype_list()
elif command == CMD_KEY_SUBSYSTEMLIST:
show_subsystem_list(product_form)
elif command == CMD_KEY_PARTLIST:
show_partname_list(product_form)
elif command == CMD_KEY_MODULELIST:
show_module_list(product_form)
else:
print("This command is not support.")
#############################################################################
#############################################################################
|
the-stack_0_21848 | import tensorflow as tf
import numpy as np
from configs import args
MAXLEN = 400
def pipeline_train(enc_inp, labels, sess):
dataset = tf.data.Dataset.from_tensor_slices((enc_inp, labels))
dataset = dataset.shuffle(len(labels)).batch(args.batch_size)
iterator = dataset.make_initializable_iterator()
enc_inp_ph = tf.placeholder(tf.int32, [None, MAXLEN])
labels_ph = tf.placeholder(tf.int32, [None])
init_dict = {enc_inp_ph: enc_inp, labels_ph: labels}
sess.run(iterator.initializer, init_dict)
return iterator, init_dict
class DiscriminatorDataLoader(object):
def __init__(self, sess, vocab):
self.sess = sess
self.vocab = vocab
self.vocab_size = vocab.vocab_size
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.imdb.load_data(
num_words=args.vocab_size, index_from=4)
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
X = tf.keras.preprocessing.sequence.pad_sequences(
X, MAXLEN, truncating='pre', padding='post')
self.train_iterator, self.train_init_dict = pipeline_train(X, y, sess)
|
the-stack_0_21849 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, List, Optional
import torch
from pytext.config.component import create_loss
from pytext.loss import MSELoss
from pytext.utils.usage import log_class_usage
from .output_layer_base import OutputLayerBase
class RegressionOutputLayer(OutputLayerBase):
"""
Output layer for doc regression models. Currently only supports Mean Squared Error
loss.
Args:
loss (MSELoss): config for MSE loss
squash_to_unit_range (bool): whether to clamp the output to the range [0, 1],
via a sigmoid.
"""
class Config(OutputLayerBase.Config):
loss: MSELoss.Config = MSELoss.Config()
squash_to_unit_range: bool = False
@classmethod
def from_config(cls, config: Config):
return cls(create_loss(config.loss), config.squash_to_unit_range)
def __init__(self, loss_fn: MSELoss, squash_to_unit_range: bool = False) -> None:
super().__init__()
self.loss_fn = loss_fn
self.squash_to_unit_range = squash_to_unit_range
log_class_usage(__class__)
def get_loss(
self,
logit: torch.Tensor,
target: torch.Tensor,
context: Optional[Dict[str, Any]] = None,
reduce: bool = True,
) -> torch.Tensor:
"""
Compute regression loss from logits and targets.
Args:
logit (torch.Tensor): Logits returned :class:`~pytext.models.Model`.
target (torch.Tensor): True label/target to compute loss against.
context (Optional[Dict[str, Any]]): Context is a dictionary of items
that's passed as additional metadata by the
:class:`~pytext.data.DataHandler`. Defaults to None.
reduce (bool): Whether to reduce loss over the batch. Defaults to True.
Returns:
torch.Tensor: Model loss.
"""
logit, _ = self.get_pred(logit)
return self.loss_fn(logit, target, reduce)
def get_pred(self, logit, *args, **kwargs):
"""
Compute predictions and scores from the model (unlike in classification, where
prediction = "most likely class" and scores = "log probs", here these are the
same values). If `squash_to_unit_range` is True, fit prediction to [0, 1] via
a sigmoid.
Args:
logit (torch.Tensor): Logits returned from the model.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Model prediction and scores.
"""
prediction = logit.squeeze(dim=1)
if self.squash_to_unit_range:
prediction = torch.sigmoid(prediction)
return prediction, prediction
def torchscript_predictions(self):
return RegressionScores(self.squash_to_unit_range)
class RegressionScores(torch.jit.ScriptModule):
def __init__(self, squash_to_unit_range: bool):
super().__init__()
self.squash_to_unit_range = torch.jit.Attribute(squash_to_unit_range, bool)
@torch.jit.script_method
def forward(self, logits: torch.Tensor) -> List[float]:
# logits: B x 1, prediction: B
prediction = logits.squeeze(dim=1)
if self.squash_to_unit_range:
prediction = torch.sigmoid(prediction)
scores: List[float] = prediction.tolist()
return scores
|
the-stack_0_21851 | import os
import unittest.mock as mock
import lxml.etree
import vanir
import vanir.events
import vanir.tests
import vanir.tests.init
import vanir.tests.storage_reflink
class TestApp(vanir.tests.TestEmitter):
pass
class TC_20_VanirHost(vanir.tests.VanirTestCase):
sample_xc_domain_getinfo = [
{'paused': 0, 'cpu_time': 243951379111104, 'ssidref': 0,
'hvm': 0, 'shutdown_reason': 255, 'dying': 0,
'mem_kb': 3733212, 'domid': 0, 'max_vcpu_id': 7,
'crashed': 0, 'running': 1, 'maxmem_kb': 3734236,
'shutdown': 0, 'online_vcpus': 8,
'handle': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'cpupool': 0, 'blocked': 0},
{'paused': 0, 'cpu_time': 2849496569205, 'ssidref': 0,
'hvm': 0, 'shutdown_reason': 255, 'dying': 0,
'mem_kb': 303916, 'domid': 1, 'max_vcpu_id': 0,
'crashed': 0, 'running': 0, 'maxmem_kb': 308224,
'shutdown': 0, 'online_vcpus': 1,
'handle': [116, 174, 229, 207, 17, 1, 79, 39, 191, 37, 41,
186, 205, 158, 219, 8],
'cpupool': 0, 'blocked': 1},
{'paused': 0, 'cpu_time': 249658663079978, 'ssidref': 0,
'hvm': 0, 'shutdown_reason': 255, 'dying': 0,
'mem_kb': 3782668, 'domid': 11, 'max_vcpu_id': 7,
'crashed': 0, 'running': 0, 'maxmem_kb': 3783692,
'shutdown': 0, 'online_vcpus': 8,
'handle': [169, 95, 55, 127, 140, 94, 79, 220, 186, 210,
117, 5, 148, 11, 185, 206],
'cpupool': 0, 'blocked': 1}]
def setUp(self):
super(TC_20_VanirHost, self).setUp()
self.app = TestApp()
self.app.vmm = mock.Mock()
self.vanir_host = vanir.app.VanirHost(self.app)
def test_000_get_vm_stats_single(self):
self.app.vmm.configure_mock(**{
'xc.domain_getinfo.return_value': self.sample_xc_domain_getinfo
})
info_time, info = self.vanir_host .get_vm_stats()
self.assertEqual(self.app.vmm.mock_calls, [
('xc.domain_getinfo', (0, 1024), {}),
])
self.assertIsNotNone(info_time)
expected_info = {
0: {
'cpu_time': 243951379111104//8,
'cpu_usage': 0,
'memory_kb': 3733212,
},
1: {
'cpu_time': 2849496569205,
'cpu_usage': 0,
'memory_kb': 303916,
},
11: {
'cpu_time': 249658663079978//8,
'cpu_usage': 0,
'memory_kb': 3782668,
},
}
self.assertEqual(info, expected_info)
def test_001_get_vm_stats_twice(self):
self.app.vmm.configure_mock(**{
'xc.domain_getinfo.return_value': self.sample_xc_domain_getinfo
})
prev_time, prev_info = self.vanir_host .get_vm_stats()
prev_time -= 1
prev_info[0]['cpu_time'] -= 10**8
prev_info[1]['cpu_time'] -= 10**9
prev_info[11]['cpu_time'] -= 125 * 10**6
info_time, info = self.vanir_host .get_vm_stats(prev_time, prev_info)
self.assertIsNotNone(info_time)
expected_info = {
0: {
'cpu_time': 243951379111104//8,
'cpu_usage': 9,
'memory_kb': 3733212,
},
1: {
'cpu_time': 2849496569205,
'cpu_usage': 99,
'memory_kb': 303916,
},
11: {
'cpu_time': 249658663079978//8,
'cpu_usage': 12,
'memory_kb': 3782668,
},
}
self.assertEqual(info, expected_info)
self.assertEqual(self.app.vmm.mock_calls, [
('xc.domain_getinfo', (0, 1024), {}),
('xc.domain_getinfo', (0, 1024), {}),
])
def test_002_get_vm_stats_one_vm(self):
self.app.vmm.configure_mock(**{
'xc.domain_getinfo.return_value': [self.sample_xc_domain_getinfo[1]]
})
vm = mock.Mock
vm.xid = 1
vm.name = 'somevm'
info_time, info = self.vanir_host .get_vm_stats(only_vm=vm)
self.assertIsNotNone(info_time)
self.assertEqual(self.app.vmm.mock_calls, [
('xc.domain_getinfo', (1, 1), {}),
])
class TC_30_VMCollection(vanir.tests.VanirTestCase):
def setUp(self):
super().setUp()
self.app = TestApp()
self.vms = vanir.app.VMCollection(self.app)
self.testvm1 = vanir.tests.init.TestVM(
None, None, qid=1, name='testvm1')
self.testvm2 = vanir.tests.init.TestVM(
None, None, qid=2, name='testvm2')
self.addCleanup(self.cleanup_vmcollection)
def cleanup_vmcollection(self):
self.testvm1.close()
self.testvm2.close()
self.vms.close()
del self.testvm1
del self.testvm2
del self.vms
del self.app
def test_000_contains(self):
self.vms._dict = {1: self.testvm1}
self.assertIn(1, self.vms)
self.assertIn('testvm1', self.vms)
self.assertIn(self.testvm1, self.vms)
self.assertNotIn(2, self.vms)
self.assertNotIn('testvm2', self.vms)
self.assertNotIn(self.testvm2, self.vms)
def test_001_getitem(self):
self.vms._dict = {1: self.testvm1}
self.assertIs(self.vms[1], self.testvm1)
self.assertIs(self.vms['testvm1'], self.testvm1)
self.assertIs(self.vms[self.testvm1], self.testvm1)
def test_002_add(self):
self.vms.add(self.testvm1)
self.assertIn(1, self.vms)
self.assertEventFired(self.app, 'domain-add',
kwargs={'vm': self.testvm1})
with self.assertRaises(TypeError):
self.vms.add(object())
testvm_qid_collision = vanir.tests.init.TestVM(
None, None, name='testvm2', qid=1)
testvm_name_collision = vanir.tests.init.TestVM(
None, None, name='testvm1', qid=2)
with self.assertRaises(ValueError):
self.vms.add(testvm_qid_collision)
with self.assertRaises(ValueError):
self.vms.add(testvm_name_collision)
def test_003_qids(self):
self.vms.add(self.testvm1)
self.vms.add(self.testvm2)
self.assertCountEqual(self.vms.qids(), [1, 2])
self.assertCountEqual(self.vms.keys(), [1, 2])
def test_004_names(self):
self.vms.add(self.testvm1)
self.vms.add(self.testvm2)
self.assertCountEqual(self.vms.names(), ['testvm1', 'testvm2'])
def test_005_vms(self):
self.vms.add(self.testvm1)
self.vms.add(self.testvm2)
self.assertCountEqual(self.vms.vms(), [self.testvm1, self.testvm2])
self.assertCountEqual(self.vms.values(), [self.testvm1, self.testvm2])
def test_006_items(self):
self.vms.add(self.testvm1)
self.vms.add(self.testvm2)
self.assertCountEqual(self.vms.items(),
[(1, self.testvm1), (2, self.testvm2)])
def test_007_len(self):
self.vms.add(self.testvm1)
self.vms.add(self.testvm2)
self.assertEqual(len(self.vms), 2)
def test_008_delitem(self):
self.vms.add(self.testvm1)
self.vms.add(self.testvm2)
del self.vms['testvm2']
self.assertCountEqual(self.vms.vms(), [self.testvm1])
self.assertEventFired(self.app, 'domain-delete',
kwargs={'vm': self.testvm2})
def test_100_get_new_unused_qid(self):
self.vms.add(self.testvm1)
self.vms.add(self.testvm2)
self.vms.get_new_unused_qid()
# def test_200_get_vms_based_on(self):
# pass
# def test_201_get_vms_connected_to(self):
# pass
class TC_80_VanirInitialPools(vanir.tests.VanirTestCase):
def setUp(self):
super().setUp()
self.app = vanir.Vanir('/tmp/vanirtest.xml', load=False,
offline_mode=True)
self.test_dir = '/var/tmp/test-varlibqubes'
self.test_patch = mock.patch.dict(
vanir.config.defaults['pool_configs']['varlibqubes'],
{'dir_path': self.test_dir})
self.test_patch.start()
def tearDown(self):
self.test_patch.stop()
self.app.close()
del self.app
def get_driver(self, fs_type, accessible):
vanir.tests.storage_reflink.mkdir_fs(self.test_dir, fs_type,
accessible=accessible, cleanup_via=self.addCleanup)
self.app.load_initial_values()
varlibqubes = self.app.pools['varlibqubes']
self.assertEqual(varlibqubes.dir_path, self.test_dir)
return varlibqubes.driver
def test_100_varlibqubes_btrfs_accessible(self):
self.assertEqual(self.get_driver('btrfs', True), 'file-reflink')
def test_101_varlibqubes_btrfs_inaccessible(self):
self.assertEqual(self.get_driver('btrfs', False), 'file')
def test_102_varlibqubes_ext4_accessible(self):
self.assertEqual(self.get_driver('ext4', True), 'file')
def test_103_varlibqubes_ext4_inaccessible(self):
self.assertEqual(self.get_driver('ext4', False), 'file')
class TC_89_VanirEmpty(vanir.tests.VanirTestCase):
def tearDown(self):
try:
os.unlink('/tmp/vanirtest.xml')
except:
pass
try:
self.app.close()
del self.app
except AttributeError:
pass
super().tearDown()
@vanir.tests.skipUnlessDom0
def test_000_init_empty(self):
# pylint: disable=no-self-use,unused-variable,bare-except
try:
os.unlink('/tmp/vanirtest.xml')
except FileNotFoundError:
pass
vanir.Vanir.create_empty_store('/tmp/vanirtest.xml').close()
def test_100_property_migrate_default_fw_netvm(self):
xml_template = '''<?xml version="1.0" encoding="utf-8" ?>
<vanir version="3.0">
<properties>
<property name="default_netvm">{default_netvm}</property>
<property name="default_fw_netvm">{default_fw_netvm}</property>
</properties>
<labels>
<label id="label-1" color="#cc0000">red</label>
</labels>
<pools>
<pool driver="file" dir_path="/tmp/vanir-test" name="default"/>
</pools>
<domains>
<domain class="StandaloneVM" id="domain-1">
<properties>
<property name="qid">1</property>
<property name="name">sys-net</property>
<property name="provides_network">True</property>
<property name="label" ref="label-1" />
<property name="netvm"></property>
<property name="uuid">2fcfc1f4-b2fe-4361-931a-c5294b35edfa</property>
</properties>
<features/>
<devices class="pci"/>
</domain>
<domain class="StandaloneVM" id="domain-2">
<properties>
<property name="qid">2</property>
<property name="name">sys-firewall</property>
<property name="provides_network">True</property>
<property name="label" ref="label-1" />
<property name="uuid">9a6d9689-25f7-48c9-a15f-8205d6c5b7c6</property>
</properties>
</domain>
<domain class="StandaloneVM" id="domain-3">
<properties>
<property name="qid">3</property>
<property name="name">appvm</property>
<property name="label" ref="label-1" />
<property name="uuid">1d6aab41-3262-400a-b3d3-21aae8fdbec8</property>
</properties>
</domain>
</domains>
</vanir>
'''
with self.subTest('default_setup'):
with open('/tmp/vanirtest.xml', 'w') as xml_file:
xml_file.write(xml_template.format(
default_netvm='sys-firewall',
default_fw_netvm='sys-net'))
self.app = vanir.Vanir('/tmp/vanirtest.xml', offline_mode=True)
self.assertEqual(
self.app.domains['sys-net'].netvm, None)
self.assertEqual(
self.app.domains['sys-firewall'].netvm, self.app.domains['sys-net'])
# property is no longer "default"
self.assertFalse(
self.app.domains['sys-firewall'].property_is_default('netvm'))
# verify that appvm.netvm is unaffected
self.assertTrue(
self.app.domains['appvm'].property_is_default('netvm'))
self.assertEqual(
self.app.domains['appvm'].netvm,
self.app.domains['sys-firewall'])
with self.assertRaises(AttributeError):
self.app.default_fw_netvm
self.app.close()
del self.app
with self.subTest('same'):
with open('/tmp/vanirtest.xml', 'w') as xml_file:
xml_file.write(xml_template.format(
default_netvm='sys-net',
default_fw_netvm='sys-net'))
self.app = vanir.Vanir('/tmp/vanirtest.xml', offline_mode=True)
self.assertEqual(
self.app.domains['sys-net'].netvm, None)
self.assertEqual(
self.app.domains['sys-firewall'].netvm,
self.app.domains['sys-net'])
self.assertTrue(
self.app.domains['sys-firewall'].property_is_default('netvm'))
# verify that appvm.netvm is unaffected
self.assertTrue(
self.app.domains['appvm'].property_is_default('netvm'))
self.assertEqual(
self.app.domains['appvm'].netvm,
self.app.domains['sys-net'])
with self.assertRaises(AttributeError):
self.app.default_fw_netvm
with self.subTest('loop'):
with open('/tmp/vanirtest.xml', 'w') as xml_file:
xml_file.write(xml_template.format(
default_netvm='sys-firewall',
default_fw_netvm='sys-firewall'))
self.app = vanir.Vanir('/tmp/vanirtest.xml', offline_mode=True)
self.assertEqual(
self.app.domains['sys-net'].netvm, None)
# this was netvm loop, better set to none, to not crash qubesd
self.assertEqual(
self.app.domains['sys-firewall'].netvm, None)
self.assertFalse(
self.app.domains['sys-firewall'].property_is_default('netvm'))
# verify that appvm.netvm is unaffected
self.assertTrue(
self.app.domains['appvm'].property_is_default('netvm'))
self.assertEqual(
self.app.domains['appvm'].netvm,
self.app.domains['sys-firewall'])
with self.assertRaises(AttributeError):
self.app.default_fw_netvm
class TC_90_Vanir(vanir.tests.VanirTestCase):
def tearDown(self):
try:
os.unlink('/tmp/vanirtest.xml')
except:
pass
super().tearDown()
def setUp(self):
super(TC_90_Vanir, self).setUp()
self.app = vanir.Vanir('/tmp/vanirtest.xml', load=False,
offline_mode=True)
self.addCleanup(self.cleanup_vanir)
self.app.load_initial_values()
self.template = self.app.add_new_vm('TemplateVM', name='test-template',
label='green')
def cleanup_vanir(self):
self.app.close()
del self.app
try:
del self.template
except AttributeError:
pass
def test_100_clockvm(self):
appvm = self.app.add_new_vm('AppVM', name='test-vm', template=self.template,
label='red')
self.assertIsNone(self.app.clockvm)
self.assertNotIn('service.clocksync', appvm.features)
self.assertNotIn('service.clocksync', self.template.features)
self.app.clockvm = appvm
self.assertIn('service.clocksync', appvm.features)
self.assertTrue(appvm.features['service.clocksync'])
self.app.clockvm = self.template
self.assertNotIn('service.clocksync', appvm.features)
self.assertIn('service.clocksync', self.template.features)
self.assertTrue(self.template.features['service.clocksync'])
def test_110_netvm_loop(self):
'''Netvm loop through default_netvm'''
netvm = self.app.add_new_vm('AppVM', name='test-net',
template=self.template, label='red')
try:
self.app.default_netvm = None
netvm.netvm = vanir.property.DEFAULT
with self.assertRaises(ValueError):
self.app.default_netvm = netvm
finally:
del netvm
def test_111_netvm_loop(self):
'''Netvm loop through default_netvm'''
netvm = self.app.add_new_vm('AppVM', name='test-net',
template=self.template, label='red')
try:
netvm.netvm = None
self.app.default_netvm = netvm
with self.assertRaises(ValueError):
netvm.netvm = vanir.property.DEFAULT
finally:
del netvm
def test_200_remove_template(self):
appvm = self.app.add_new_vm('AppVM', name='test-vm',
template=self.template,
label='red')
with mock.patch.object(self.app, 'vmm'):
with self.assertRaises(vanir.exc.VanirException):
del self.app.domains[self.template]
def test_201_remove_netvm(self):
netvm = self.app.add_new_vm('AppVM', name='test-netvm',
template=self.template, provides_network=True,
label='red')
appvm = self.app.add_new_vm('AppVM', name='test-vm',
template=self.template,
label='red')
appvm.netvm = netvm
with mock.patch.object(self.app, 'vmm'):
with self.assertRaises(vanir.exc.VanirVMInUseError):
del self.app.domains[netvm]
def test_202_remove_default_netvm(self):
netvm = self.app.add_new_vm('AppVM', name='test-netvm',
template=self.template, provides_network=True,
label='red')
netvm.netvm = None
self.app.default_netvm = netvm
with mock.patch.object(self.app, 'vmm'):
with self.assertRaises(vanir.exc.VanirVMInUseError):
del self.app.domains[netvm]
def test_203_remove_default_dispvm(self):
appvm = self.app.add_new_vm('AppVM', name='test-appvm',
template=self.template,
label='red')
self.app.default_dispvm = appvm
with mock.patch.object(self.app, 'vmm'):
with self.assertRaises(vanir.exc.VanirVMInUseError):
del self.app.domains[appvm]
def test_204_remove_appvm_dispvm(self):
dispvm = self.app.add_new_vm('AppVM', name='test-appvm',
template=self.template,
label='red')
appvm = self.app.add_new_vm('AppVM', name='test-appvm2',
template=self.template, default_dispvm=dispvm,
label='red')
with mock.patch.object(self.app, 'vmm'):
with self.assertRaises(vanir.exc.VanirVMInUseError):
del self.app.domains[dispvm]
def test_205_remove_appvm_dispvm(self):
appvm = self.app.add_new_vm('AppVM', name='test-appvm',
template=self.template, template_for_dispvms=True,
label='red')
dispvm = self.app.add_new_vm('DispVM', name='test-dispvm',
template=appvm,
label='red')
with mock.patch.object(self.app, 'vmm'):
with self.assertRaises(vanir.exc.VanirVMInUseError):
del self.app.domains[appvm]
@vanir.tests.skipUnlessGit
def test_900_example_xml_in_doc(self):
self.assertXMLIsValid(
lxml.etree.parse(open(
os.path.join(vanir.tests.in_git, 'doc/example.xml'), 'rb')),
'vanir.rng')
|
the-stack_0_21856 | # Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import falcon
from oslo_log import log as logging
import six
from zaqar.common import decorators
from zaqar.i18n import _
from zaqar.storage import errors as storage_errors
from zaqar.transport import acl
from zaqar.transport import utils
from zaqar.transport import validation
from zaqar.transport.wsgi import errors as wsgi_errors
from zaqar.transport.wsgi import utils as wsgi_utils
LOG = logging.getLogger(__name__)
class ItemResource(object):
__slots__ = ('_validate', '_queue_controller', '_message_controller')
def __init__(self, validate, queue_controller, message_controller):
self._validate = validate
self._queue_controller = queue_controller
self._message_controller = message_controller
@decorators.TransportLog("Queue metadata")
@acl.enforce("queues:get")
def on_get(self, req, resp, project_id, queue_name):
try:
resp_dict = self._queue_controller.get(queue_name,
project=project_id)
tmp = self._validate.get_limit_conf_value('max_messages_post_size')
queue_max_msg_size = resp_dict.get('_max_messages_post_size', tmp)
resp_dict['_max_messages_post_size'] = queue_max_msg_size
tmp = self._validate.get_limit_conf_value('default_message_ttl')
queue_default_ttl = resp_dict.get('_default_message_ttl', tmp)
resp_dict['_default_message_ttl'] = queue_default_ttl
except storage_errors.DoesNotExist as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPNotFound(six.text_type(ex))
except Exception as ex:
LOG.exception(ex)
description = _(u'Queue metadata could not be retrieved.')
raise wsgi_errors.HTTPServiceUnavailable(description)
resp.body = utils.to_json(resp_dict)
# status defaults to 200
@decorators.TransportLog("Queue item")
@acl.enforce("queues:create")
def on_put(self, req, resp, project_id, queue_name):
try:
# Place JSON size restriction before parsing
self._validate.queue_metadata_length(req.content_length)
# Deserialize queue metadata
metadata = None
if req.content_length:
document = wsgi_utils.deserialize(req.stream,
req.content_length)
metadata = wsgi_utils.sanitize(document, spec=None)
self._validate.queue_metadata_putting(metadata)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))
try:
created = self._queue_controller.create(queue_name,
metadata=metadata,
project=project_id)
except storage_errors.FlavorDoesNotExist as ex:
LOG.exception(ex)
raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))
except Exception as ex:
LOG.exception(ex)
description = _(u'Queue could not be created.')
raise wsgi_errors.HTTPServiceUnavailable(description)
resp.status = falcon.HTTP_201 if created else falcon.HTTP_204
resp.location = req.path
@decorators.TransportLog("Queue item")
@acl.enforce("queues:delete")
def on_delete(self, req, resp, project_id, queue_name):
LOG.debug(u'Queue item DELETE - queue: %(queue)s, '
u'project: %(project)s',
{'queue': queue_name, 'project': project_id})
try:
self._queue_controller.delete(queue_name, project=project_id)
except Exception as ex:
LOG.exception(ex)
description = _(u'Queue could not be deleted.')
raise wsgi_errors.HTTPServiceUnavailable(description)
resp.status = falcon.HTTP_204
@acl.enforce("queues:update")
def on_patch(self, req, resp, project_id, queue_name):
"""Allows one to update a queue's metadata.
This method expects the user to submit a JSON object. There is also
strict format checking through the use of
jsonschema. Appropriate errors are returned in each case for
badly formatted input.
:returns: HTTP | 200,400,409,503
"""
LOG.debug(u'PATCH queue - name: %s', queue_name)
try:
# Place JSON size restriction before parsing
self._validate.queue_metadata_length(req.content_length)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestBody(six.text_type(ex))
# NOTE(flwang): See below link to get more details about draft 10,
# tools.ietf.org/html/draft-ietf-appsawg-json-patch-10
content_types = {
'application/openstack-messaging-v2.0-json-patch': 10,
}
if req.content_type not in content_types:
headers = {'Accept-Patch':
', '.join(sorted(content_types.keys()))}
msg = _("Accepted media type for PATCH: %s.")
LOG.debug(msg % headers)
raise wsgi_errors.HTTPUnsupportedMediaType(msg % headers)
if req.content_length:
try:
changes = utils.read_json(req.stream, req.content_length)
changes = wsgi_utils.sanitize(changes,
spec=None, doctype=list)
except utils.MalformedJSON as ex:
LOG.debug(ex)
description = _(u'Request body could not be parsed.')
raise wsgi_errors.HTTPBadRequestBody(description)
except utils.OverflowedJSONInteger as ex:
LOG.debug(ex)
description = _(u'JSON contains integer that is too large.')
raise wsgi_errors.HTTPBadRequestBody(description)
except Exception as ex:
# Error while reading from the network/server
LOG.exception(ex)
description = _(u'Request body could not be read.')
raise wsgi_errors.HTTPServiceUnavailable(description)
else:
msg = _("PATCH body could not be empty for update.")
LOG.debug(msg)
raise wsgi_errors.HTTPBadRequestBody(msg)
try:
changes = self._validate.queue_patching(req, changes)
# NOTE(Eva-i): using 'get_metadata' instead of 'get', so
# QueueDoesNotExist error will be thrown in case of non-existent
# queue.
metadata = self._queue_controller.get_metadata(queue_name,
project=project_id)
for change in changes:
change_method_name = '_do_%s' % change['op']
change_method = getattr(self, change_method_name)
change_method(req, metadata, change)
self._validate.queue_metadata_putting(metadata)
self._queue_controller.set_metadata(queue_name,
metadata,
project_id)
except storage_errors.DoesNotExist as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPNotFound(six.text_type(ex))
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestBody(six.text_type(ex))
except wsgi_errors.HTTPConflict as ex:
raise ex
except Exception as ex:
LOG.exception(ex)
description = _(u'Queue could not be updated.')
raise wsgi_errors.HTTPServiceUnavailable(description)
resp.body = utils.to_json(metadata)
def _do_replace(self, req, metadata, change):
path = change['path']
path_child = path[1]
value = change['value']
if path_child in metadata:
metadata[path_child] = value
else:
msg = _("Can't replace non-existent object %s.")
raise wsgi_errors.HTTPConflict(msg % path_child)
def _do_add(self, req, metadata, change):
path = change['path']
path_child = path[1]
value = change['value']
metadata[path_child] = value
def _do_remove(self, req, metadata, change):
path = change['path']
path_child = path[1]
if path_child in metadata:
metadata.pop(path_child)
else:
msg = _("Can't remove non-existent object %s.")
raise wsgi_errors.HTTPConflict(msg % path_child)
class CollectionResource(object):
__slots__ = ('_queue_controller', '_validate')
def __init__(self, validate, queue_controller):
self._queue_controller = queue_controller
self._validate = validate
@decorators.TransportLog("Queue collection")
@acl.enforce("queues:get_all")
def on_get(self, req, resp, project_id):
kwargs = {}
# NOTE(kgriffs): This syntax ensures that
# we don't clobber default values with None.
req.get_param('marker', store=kwargs)
req.get_param_as_int('limit', store=kwargs)
req.get_param_as_bool('detailed', store=kwargs)
try:
self._validate.queue_listing(**kwargs)
results = self._queue_controller.list(project=project_id, **kwargs)
# Buffer list of queues
queues = list(next(results))
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))
except Exception as ex:
LOG.exception(ex)
description = _(u'Queues could not be listed.')
raise wsgi_errors.HTTPServiceUnavailable(description)
# Got some. Prepare the response.
kwargs['marker'] = next(results) or kwargs.get('marker', '')
for each_queue in queues:
each_queue['href'] = req.path + '/' + each_queue['name']
links = []
if queues:
links = [
{
'rel': 'next',
'href': req.path + falcon.to_query_str(kwargs)
}
]
response_body = {
'queues': queues,
'links': links
}
resp.body = utils.to_json(response_body)
# status defaults to 200
|
the-stack_0_21857 | #!/usr/bin/python
# -'''- coding: utf-8 -'''-
from glob import glob
import os
import subprocess
from PySide.QtCore import *
from PySide.QtGui import *
import BasketBuilder
import BasketGlobals as config
class WindowLayout(QTabWidget):
# Define Emitter Signals
launch = Signal(int, str)
createnew = Signal(int)
openasset = Signal(str)
newasset = Signal(str)
# renderscene = Signal(int, str, str)
def __init__(self, parent=None):
super(WindowLayout, self).__init__(parent)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# TABS
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
self.tabAssets = QWidget()
self.tabShots = QWidget()
self.tabMisc = QWidget()
self.addTab(self.tabShots, "tabShots")
self.addTab(self.tabAssets, "tabAssets")
self.addTab(self.tabMisc, "tabMisc")
self.setTabText(0, "Shots")
self.setTabText(1, "Assets")
self.setTabText(2, "Misc")
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# SHOTS
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# S3 INPUTS
self.label_scene = QLabel('Scene')
self.label_scene.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.label_shot = QLabel('Shot')
self.label_shot.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.dropdown_scene = QComboBox()
self.dropdown_scene.setMinimumWidth(100)
self.dropdown_shot = QComboBox()
self.dropdown_shot.setMinimumWidth(100)
# S3 LAYOUT
hbox_scene = QHBoxLayout()
hbox_scene.addWidget(self.label_scene)
hbox_scene.addWidget(self.dropdown_scene)
hbox_shot = QHBoxLayout()
hbox_shot.addWidget(self.label_shot)
hbox_shot.addWidget(self.dropdown_shot)
# MISC WIDGETS
self.label_options = QLabel('Options')
self.label_tag = QLabel('Tag')
self.dropdown_tag = QComboBox()
self.label_stage = QLabel('Stage')
self.dropdown_stage = QComboBox()
self.dropdown_stage.setMinimumWidth(100)
for i_stage, t_stage in enumerate(config.STAGE_DIRS):
self.dropdown_stage.addItem(t_stage)
# MISC LAYOUT
vbox_tag = QVBoxLayout()
vbox_tag.addWidget(self.label_tag)
vbox_tag.addWidget(self.dropdown_tag)
vbox_stage = QVBoxLayout()
vbox_stage.addWidget(self.label_stage)
vbox_stage.addWidget(self.dropdown_stage)
# LAUNCH BUTTONS
self.btn_launch = QPushButton('Launch Existing...')
self.btn_create = QPushButton('Create New...')
# self.label_render = QLabel('Make Sure Your Camera is Set in the Render Settings!')
# self.label_camera = QLabel('Alt Camera:')
# self.text_camera = QLineEdit()
# self.btn_render = QPushButton('Render Draft')
# Check if there is an existing file
self.updateDB()
self.dropdown_scene.currentIndexChanged.connect(self.updateShotList)
self.dropdown_stage.currentIndexChanged.connect(self.updateEnv)
self.dropdown_shot.currentIndexChanged.connect(self.updateEnv)
# LAUNCH SIGNALS
self.btn_launch.clicked.connect(self.emitlaunch)
# self.btn_launch.clicked.connect(QCoreApplication.instance().quit)
self.btn_create.clicked.connect(self.emitcreate)
# self.btn_create.clicked.connect(QCoreApplication.instance().quit)
# self.btn_render.clicked.connect(self.emitrender)
# APP LAYOUT
layout = QVBoxLayout()
appWrapper = QHBoxLayout()
leftColumn = QVBoxLayout()
leftUpper = QVBoxLayout()
leftUpper.addLayout(hbox_scene)
leftUpper.addLayout(hbox_shot)
leftUpper.addStretch(3)
leftUpper.setContentsMargins(20, 20, 20, 20)
leftLower = QVBoxLayout()
leftLower.addWidget(self.btn_launch)
leftLower.addWidget(self.btn_create)
leftLower.setContentsMargins(20, 0, 20, 0)
leftColumn.addLayout(leftUpper)
leftColumn.addLayout(leftLower)
rightColumn = QVBoxLayout()
rightColumn.addWidget(self.label_options)
rightColumn.addLayout(vbox_tag)
rightColumn.addLayout(vbox_stage)
rightColumn.addStretch(3)
bottomRow = QVBoxLayout()
line = QFrame()
line.setFrameStyle(QFrame.HLine | QFrame.Sunken)
line.setLineWidth(1)
line.setMidLineWidth(1)
# bottomRow.addWidget(line)
# bottomContent = QVBoxLayout()
# camLayout = QHBoxLayout()
# camLayout.addWidget(self.label_camera)
# camLayout.addWidget(self.text_camera)
# bottomContent.addLayout(camLayout)
# bottomContent.addWidget(self.label_render)
# bottomContent.addWidget(self.btn_render)
# bottomContent.setContentsMargins(0,20,0,20)
#
# bottomRow.addLayout(bottomContent)
appWrapper.addLayout(leftColumn)
appWrapper.addLayout(rightColumn)
layout.addLayout(appWrapper)
# layout.addLayout(bottomRow)
self.tabShots.setLayout(layout)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# ASSETS
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
self.btn_browse = QPushButton("Browse Assets")
self.btn_AssetLaunch = QPushButton("Launch")
self.label_Directory = QLabel("Directory:")
self.label_Directory.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.text_Directory = QLineEdit()
self.label_AssetName = QLabel('Name: ')
self.text_AssetName = QLineEdit()
self.btn_NewAsset = QPushButton('New Asset')
self.btn_AssetLaunch.clicked.connect(self.launchAsset)
self.btn_browse.clicked.connect(self.browseAssets)
self.btn_NewAsset.clicked.connect(self.launchNewAsset)
assetLayout = QVBoxLayout()
inputLayout = QVBoxLayout()
buttonLayout = QHBoxLayout()
inputLayout.addWidget(self.label_Directory)
inputLayout.addWidget(self.text_Directory)
buttonLayout.addWidget(self.btn_browse)
buttonLayout.addWidget(self.btn_AssetLaunch)
inputLayout.addLayout(buttonLayout)
inputLayout.addStretch(3)
inputLayout.setContentsMargins(0, 20, 0, 20)
newInput = QHBoxLayout()
newInput.addWidget(self.label_AssetName)
newInput.addWidget(self.text_AssetName)
newassetLayout = QVBoxLayout()
newassetLayout.addLayout(newInput)
newassetLayout.addWidget(self.btn_NewAsset)
newassetLayout.addStretch(3)
newassetLayout.setContentsMargins(0, 20, 0, 20)
assetLayout.addLayout(inputLayout)
assetLayout.addWidget(line)
assetLayout.addLayout(newassetLayout)
self.tabAssets.setLayout(assetLayout)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# MISC
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
self.label_Browser = QLabel("LAW Server Folder: ")
self.btn_Folder = QPushButton("Open")
self.btn_Folder.clicked.connect(self.openExplorer)
self.link_WebLogin = QLabel()
self.link_Trello = QLabel()
self.link_WebLogin.setText('''<a href="http://lobstersare.online/wp-login.php">Site Login</a>''')
self.link_WebLogin.setOpenExternalLinks(True)
self.link_Trello.setText('''<a href="https://trello.com/b/OEhZ5SGb">Trello Board</a>''')
self.link_Trello.setOpenExternalLinks(True)
miscLayout = QVBoxLayout()
folderLayout = QHBoxLayout()
linkLayout = QVBoxLayout()
folderLayout.addWidget(self.label_Browser)
folderLayout.addWidget(self.btn_Folder)
linkLayout.addWidget(self.link_WebLogin)
linkLayout.addWidget(self.link_Trello)
linkLayout.addStretch(3)
miscLayout.addLayout(folderLayout)
miscLayout.addLayout(linkLayout)
self.tabMisc.setLayout(miscLayout)
def openExplorer(self):
subprocess.Popen(r'explorer \\awexpress.westphal.drexel.edu\digm_anfx\SRPJ_LAW')
def browseAssets(self):
assetFile = QFileDialog.getOpenFileName(self,
"Open Asset",
os.path.join(config.serverDir(), 'working', 'assets'),
)
correctedPath = assetFile[0].replace('//', '\\\\').replace('/', '\\')
self.text_Directory.setText(correctedPath)
def launchAsset(self):
if self.text_Directory.text() != '':
self.openasset.emit(self.text_Directory.text())
def launchNewAsset(self):
if self.text_AssetName.text() != '':
self.newasset.emit(self.text_AssetName.text())
self.text_AssetName.clear()
def updateDB(self):
self.updateSceneList()
self.updateShotList()
def updateSceneList(self):
BAD_DIRS = ['assets', 'animatic']
self.dropdown_scene.clear()
dirs = next(os.walk(os.path.join(config.serverDir(), 'working', 'scenes')))[1]
dirs.sort()
for dirname in dirs:
if dirname not in BAD_DIRS:
self.dropdown_scene.addItem(dirname)
config.setSeq(self.dropdown_scene.currentText())
def updateShotList(self):
config.setSeq(self.dropdown_scene.currentText())
self.dropdown_shot.clear()
if os.getenv('SEQ') != '':
for i_shot, t_shot in enumerate(sorted(next(os.walk(os.path.join(config.serverDir(), 'working', 'scenes', os.getenv('SEQ'))))[1])):
self.dropdown_shot.addItem(t_shot)
config.setShot(self.dropdown_shot.currentText())
self.updateTags()
else:
self.canLaunch()
def emitlaunch(self):
# Return the stage index to the launcher, add one to compensate for zero-based index
config.setStage(self.getStageIndex())
self.launch.emit(self.getStageIndex(), self.dropdown_tag.currentText())
def emitcreate(self):
config.setStage(self.getStageIndex())
self.createnew.emit(self.getStageIndex())
def emitrender(self):
config.setStage(self.getStageIndex())
self.renderscene.emit(self.getStageIndex(), self.dropdown_tag.currentText(), self.text_camera.text())
def getTags(self):
# Grab all the files in given stage directory, unbiased of file type
files = glob(os.path.join(config.stageDir(self.getStageIndex()), '*.*'))
sort = []
for i, n in enumerate(files):
# Add all found file variables to a list
filename, ext = os.path.splitext(n)
# print ext
splt = os.path.basename(n).split('_')
if len(splt) >= 2:
sort.append(splt[2])
# Sets are DISTINCT objects, no repeats, removes duplicate names
distinct = set(sort)
return distinct
def updateTags(self):
self.dropdown_tag.clear()
for i_tag, t_tag in enumerate(self.getTags()):
self.dropdown_tag.addItem(t_tag)
# Whenever tags update, we need to update whether or not there is existing file
self.canLaunch()
def getStageIndex(self):
return int(self.dropdown_stage.currentIndex())
def canLaunch(self):
if self.dropdown_tag.count() >= 1:
self.btn_launch.setEnabled(True)
# self.btn_render.setEnabled(True)
else:
self.btn_launch.setDisabled(True)
# self.btn_render.setDisabled(True)
def updateEnv(self):
if self.dropdown_shot.currentText() != '':
config.setShot(self.dropdown_shot.currentText())
self.updateTags()
class QDialog_FolderCreate(QDialog):
sendirs = Signal(str, str)
def __init__(self, parent=None):
super(QDialog_FolderCreate, self).__init__(parent)
self.sceneLabel = QLabel("Scene: ")
self.sceneLabel.setMinimumWidth(40)
self.sceneName = QLineEdit()
self.sceneName.setMaximumWidth(150)
self.sceneName.setPlaceholderText("Type Scene Here...")
self.shotLabel = QLabel("Shot: ")
self.shotLabel.setMinimumWidth(40)
self.shotName = QLineEdit()
self.shotName.setMaximumWidth(150)
self.shotName.setPlaceholderText("Type Shot Here...")
self.submitbtn = QPushButton("Create")
self.quitbtn = QPushButton("Quit")
self.quitbtn.clicked.connect(self.close)
hbox_Scene = QHBoxLayout()
hbox_Scene.addWidget(self.sceneLabel)
hbox_Scene.addWidget(self.sceneName)
hbox_Scene.addStretch(1)
hbox_Shot = QHBoxLayout()
hbox_Shot.addWidget(self.shotLabel)
hbox_Shot.addWidget(self.shotName)
hbox_Shot.addStretch(1)
hbox_Cmd = QHBoxLayout()
hbox_Cmd.addStretch(1)
hbox_Cmd.addWidget(self.submitbtn)
hbox_Cmd.addWidget(self.quitbtn)
# Create layout and add widgets
layout = QVBoxLayout()
layout.addLayout(hbox_Scene)
layout.addLayout(hbox_Shot)
layout.addLayout(hbox_Cmd)
# Set dialog layout
self.setLayout(layout)
# Add submitbtn signal
self.submitbtn.clicked.connect(self.pressbtn)
def pressbtn(self):
self.sendirs.emit(self.sceneName.text(), self.shotName.text())
self.sceneName.clear()
self.shotName.clear()
class Launcher(QMainWindow):
def __init__(self, parent=None):
super(Launcher, self).__init__(parent)
self.mainlayout = WindowLayout()
self.initUI()
def initUI(self):
# CREATE MENU BAR
exitAction = QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
shotAction = QAction('&Create Shot', self)
shotAction.setStatusTip('Build out folder structure for a new shot')
shotAction.triggered.connect(self.create_dir)
syncAction = QAction('&Sync Project', self)
syncAction.setStatusTip('Sync Local Project with Server')
syncAction.triggered.connect(self.synclocal)
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
buildMenu = menubar.addMenu('&Build')
fileMenu.addAction(exitAction)
buildMenu.addAction(shotAction)
buildMenu.addAction(syncAction)
self.setCentralWidget(self.mainlayout)
self.show()
def create_dir(self):
self.modalFolder = QDialog_FolderCreate()
self.modalFolder.setWindowTitle('Create')
self.modalFolder.show()
self.modalFolder.sendirs.connect(self.send_to_make)
def synclocal(self):
BasketBuilder.rep_prod_dir()
@Slot(str, str)
def send_to_make(self, scene, shot):
BasketBuilder.make_prod_dir(scene, shot)
BasketBuilder.make_frame_dir(scene, shot)
self.mainlayout.updateDB() |
the-stack_0_21858 | import torch.nn as nn
def accuracy(pred, target, topk=1):
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk,)
return_single = True
else:
return_single = False
maxk = max(topk)
_, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1,)):
super().__init__()
self.topk = topk
def forward(self, pred, target):
return accuracy(pred, target, self.topk)
|
the-stack_0_21859 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet replace-by-fee capabilities in conjunction with the fallbackfee."""
from test_framework.test_framework import BitcoinReloadTestFramework
from test_framework.util import assert_raises_rpc_error
class WalletRBFTest(BitcoinReloadTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].generate(101)
# sending a transaction without fee estimations must be possible by default on regtest
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# test sending a tx with disabled fallback fee (must fail)
self.restart_node(0, extra_args=["-fallbackfee=0"])
assert_raises_rpc_error(-6, "Fee estimation failed", lambda: self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1))
assert_raises_rpc_error(-4, "Fee estimation failed", lambda: self.nodes[0].fundrawtransaction(self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})))
assert_raises_rpc_error(-6, "Fee estimation failed", lambda: self.nodes[0].sendmany("", {self.nodes[0].getnewaddress(): 1}))
if __name__ == '__main__':
WalletRBFTest().main()
|
the-stack_0_21861 | from torch.utils.data import Dataset
class MyTensorDataset(Dataset):
def __init__(self, *tensors, transforms=None):
if transforms:
assert tensors[0][0].dim() == 3 # Only Images for now
self.transforms = transforms
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
def __getitem__(self, index):
res = [tensor[index] for tensor in self.tensors]
if self.transforms:
res[0] = self.transforms(res[0])
return tuple(res)
def __len__(self):
return self.tensors[0].size(0)
|
the-stack_0_21862 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# This file contains google utils: https://cloud.google.com/storage/docs/reference/libraries
# pip install --upgrade google-cloud-storage
# from google.cloud import storage
import os
import platform
import time
from pathlib import Path
def attempt_download(weights):
# Attempt to download pretrained weights if not found locally
weights = weights.strip().replace("'", '')
msg = weights + ' missing'
r = 1 # return
if len(weights) > 0 and not os.path.isfile(weights):
d = {'': '',
}
file = Path(weights).name
if file in d:
r = gdrive_download(id=d[file], name=weights)
if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
os.remove(weights) if os.path.exists(weights) else None # remove partial downloads
s = 'curl -L -o %s "storage.googleapis.com/%s"' % (weights, file)
r = os.system(s) # execute, capture return values
# Error check
if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
os.remove(weights) if os.path.exists(weights) else None # remove partial downloads
raise Exception(msg)
def gdrive_download(id='1n_oKgR81BJtqk75b00eAjdv03qVCQn2f', name='coco128.zip'):
# Downloads a file from Google Drive, accepting presented query
# from utils.google_utils import *; gdrive_download()
t = time.time()
print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='')
os.remove(name) if os.path.exists(name) else None # remove existing
os.remove('cookie') if os.path.exists('cookie') else None
# Attempt file download
out = "NUL" if platform.system() == "Windows" else "/dev/null"
os.system('curl -c ./cookie -s -L "drive.google.com/uc?export=download&id=%s" > %s ' % (id, out))
if os.path.exists('cookie'): # large file
s = 'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm=%s&id=%s" -o %s' % (get_token(), id, name)
else: # small file
s = 'curl -s -L -o %s "drive.google.com/uc?export=download&id=%s"' % (name, id)
r = os.system(s) # execute, capture return values
os.remove('cookie') if os.path.exists('cookie') else None
# Error check
if r != 0:
os.remove(name) if os.path.exists(name) else None # remove partial
print('Download error ') # raise Exception('Download error')
return r
# Unzip if archive
if name.endswith('.zip'):
print('unzipping... ', end='')
os.system('unzip -q %s' % name) # unzip
os.remove(name) # remove zip to free space
print('Done (%.1fs)' % (time.time() - t))
return r
def get_token(cookie="./cookie"):
with open(cookie) as f:
for line in f:
if "download" in line:
return line.split()[-1]
return ""
|
the-stack_0_21865 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.contrib import admin
from .. import settings
from ..fields import folder
class PermissionAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': (('type', 'folder',))}),
(None, {'fields': (('user', 'group', 'everybody'),)}),
(None, {'fields': (
('can_edit', 'can_read', 'can_add_children')
)}),
)
raw_id_fields = ('user', 'group',)
list_filter = ['user']
list_display = ['__str__', 'folder', 'user']
def formfield_for_foreignkey(self, db_field, request, **kwargs):
db = kwargs.get('using')
if db_field.name == 'folder':
kwargs['widget'] = folder.AdminFolderWidget(
db_field.remote_field, self.admin_site, using=db
)
return super(PermissionAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs
)
def get_model_perms(self, request):
# don't display the permissions admin if permissions are disabled.
# This method is easier for testing than not registering the admin
# at all at import time
enable_permissions = settings.FILER_ENABLE_PERMISSIONS and \
request.user.has_perm('filer.add_folderpermission')
return {
'add': enable_permissions,
'change': enable_permissions,
'delete': enable_permissions,
}
|
the-stack_0_21866 | """
Display uptime for workers in running Storm topologies.
"""
from pkg_resources import parse_version
from .common import add_config, add_environment
from ..util import get_ui_json, get_ui_jsons, print_stats_table, storm_lib_version
def subparser_hook(subparsers):
""" Hook to add subparser for this command. """
subparser = subparsers.add_parser(
"worker_uptime", description=__doc__, help=main.__doc__
)
subparser.set_defaults(func=main)
add_config(subparser)
add_environment(subparser)
def display_worker_uptime(env_name, config_file=None):
topology_summary_path = "/api/v1/topology/summary"
topology_detail_path = "/api/v1/topology/{topology}"
component_path = "/api/v1/topology/{topology}/component/{component}"
topo_summary_json = get_ui_json(
env_name, topology_summary_path, config_file=config_file
)
topology_ids = [x["id"] for x in topo_summary_json["topologies"]]
topology_components = dict()
worker_stats = []
topology_detail_jsons = get_ui_jsons(
env_name,
(topology_detail_path.format(topology=topology) for topology in topology_ids),
config_file=config_file,
)
for topology in topology_ids:
topology_detail_json = topology_detail_jsons[
topology_detail_path.format(topology=topology)
]
spouts = [x["spoutId"] for x in topology_detail_json["spouts"]]
bolts = [x["boltId"] for x in topology_detail_json["bolts"]]
topology_components[topology] = spouts + bolts
comp_details = get_ui_jsons(
env_name,
(
component_path.format(topology=topology, component=comp)
for topology, comp_list in topology_components.items()
for comp in comp_list
),
config_file=config_file,
)
for comp_detail in comp_details.values():
worker_stats += [
(worker["host"], worker["id"], worker["uptime"], worker["workerLogLink"])
for worker in comp_detail["executorStats"]
]
worker_stats = sorted(set(worker_stats))
print_stats_table(
"Worker Stats",
worker_stats,
["Host", "Worker ID", "Uptime", "Log URL"],
custom_alignment={"Uptime": "r"},
)
def main(args):
""" Display uptime for Storm workers. """
storm_version = storm_lib_version()
if storm_version >= parse_version("0.9.2-incubating"):
display_worker_uptime(args.environment, config_file=args.config)
else:
print(f"ERROR: Storm {storm_version} does not support this command.")
|
the-stack_0_21868 | import os
import asyncio
import synapse.exc as s_exc
import synapse.lib.coro as s_coro
import synapse.lib.lmdbslab as s_lmdbslab
import synapse.lib.slabseqn as s_slabseqn
import synapse.tests.utils as s_t_utils
class SlabSeqn(s_t_utils.SynTest):
def chk_size(self, seqn):
self.eq(seqn.stat()['entries'], seqn.size)
async def test_slab_seqn(self):
with self.getTestDir() as dirn:
path = os.path.join(dirn, 'test.lmdb')
slab = await s_lmdbslab.Slab.anit(path, map_size=1000000)
seqn = s_slabseqn.SlabSeqn(slab, 'seqn:test')
self.chk_size(seqn)
self.eq(seqn.nextindx(), 0)
items = ('foo', 10, 20)
seqn.save(items)
retn = tuple(seqn.iter(0))
self.eq(retn, ((0, 'foo'), (1, 10), (2, 20)))
self.chk_size(seqn)
self.raises(s_exc.NotMsgpackSafe, seqn.save, ({'set'},))
retn = tuple(seqn.iter(0))
self.eq(retn, ((0, 'foo'), (1, 10), (2, 20)))
self.eq(seqn.nextindx(), 3)
await slab.fini()
# Reopen the seqn and continue where we left off
slab = await s_lmdbslab.Slab.anit(path, map_size=1000000)
seqn = s_slabseqn.SlabSeqn(slab, 'seqn:test')
self.eq(seqn.index(), 3)
self.chk_size(seqn)
self.eq(seqn.nextindx(), 3)
seqn.save(items)
retn = tuple(seqn.iter(0))
self.eq(retn, ((0, 'foo'), (1, 10), (2, 20),
(3, 'foo'), (4, 10), (5, 20)))
self.eq(seqn.nextindx(), 6)
# We can also start in the middle of the sequence
retn = tuple(seqn.iter(4))
self.eq(retn, ((4, 10), (5, 20)))
# iterating past the end yields nothing
retn = tuple(seqn.iter(100))
self.eq(retn, ())
evnt = seqn.getOffsetEvent(4)
self.true(evnt.is_set())
evnt1 = seqn.getOffsetEvent(8)
evnt2 = seqn.getOffsetEvent(9)
evnt3 = seqn.getOffsetEvent(8)
seqn.save(items)
retn = tuple(seqn.iter(0))
self.len(9, retn)
self.chk_size(seqn)
self.eq('foo', seqn.getByIndxByts(b'\x00' * 8))
self.true(evnt1.is_set())
self.true(await seqn.waitForOffset(8, timeout=0.5))
self.false(evnt2.is_set())
self.false(await seqn.waitForOffset(9, timeout=0.1))
self.true(evnt3.is_set())
state = None
started = asyncio.Event()
async def taskloop():
nonlocal state
state = 'started'
started.set()
state = await seqn.waitForOffset(9, timeout=5)
task = asyncio.get_running_loop().create_task(taskloop())
self.true(await s_coro.event_wait(started, 2))
self.eq(state, 'started')
seqn.add('bar')
self.true(evnt2.is_set())
self.true(state)
await task
self.eq((0, 'foo'), seqn.pop(0))
self.none(seqn.pop(0))
self.chk_size(seqn)
async def getter():
retn = []
async for item in seqn.gets(8):
if item[1] is None:
return retn
retn.append(item)
return retn
task = slab.schedCoro(getter())
await asyncio.sleep(0)
seqn.add(None)
self.eq(((8, 20), (9, 'bar')), await asyncio.wait_for(task, timeout=3))
await seqn.cull(8)
self.chk_size(seqn)
self.eq(((9, 'bar'), (10, None)), [x async for x in seqn.gets(8, wait=False)])
# overwrite existing
seqn.add('baz', indx=9)
self.chk_size(seqn)
# no oldv for smaller indx
seqn.add('bam', indx=8)
self.chk_size(seqn)
# append indx
seqn.add('faz', indx=15)
self.chk_size(seqn)
await seqn.cull(14)
self.chk_size(seqn)
seqn.trim(0)
self.chk_size(seqn)
await slab.fini()
|
the-stack_0_21869 | import sys
if sys.version_info < (3, 8): # pragma: no cover (<PY38)
import importlib_metadata
else: # pragma: no cover (PY38+)
import importlib.metadata as importlib_metadata
CONFIG_FILE = '.pre-commit-config.yaml'
MANIFEST_FILE = '.pre-commit-hooks.yaml'
# Bump when installation changes in a backwards / forwards incompatible way
INSTALLED_STATE_VERSION = '1'
# Bump when modifying `empty_template`
LOCAL_REPO_VERSION = '1'
VERSION = importlib_metadata.version('pre_commit')
# `manual` is not invoked by any installed git hook. See #719
STAGES = (
'commit', 'merge-commit', 'prepare-commit-msg', 'commit-msg',
'post-commit', 'manual', 'post-checkout', 'push', 'post-merge',
'post-rewrite',
)
DEFAULT = 'default'
|
the-stack_0_21872 | #! /usr/bin/env python
"""
Usage:
alex FILE_NAME
Arguments:
FILE input file
"""
from __future__ import print_function
from docopt import docopt
from subprocess import Popen, PIPE, STDOUT, call
import re
import time
time_log = []
def timer(test_code):
def timed(*args, **kwargs):
start_time = time.time()
result = test_code(*args, **kwargs)
time_log.append(round(time.time() - start_time, 3))
return result
return timed
def _collect_input(content):
m = re.findall('"""I\n(.*?)"""', content, re.DOTALL)
if len(m):
return m
return None
def _collect_output(content):
m = re.findall('"""O\n(.*?)"""', content, re.DOTALL)
if len(m):
return m
return None
def _test_python_input_output(filename, inputs, outputs):
if len(inputs) != len(outputs):
raise ValueError('inputs and outputs should be of same length')
results = _test_python_input(filename, inputs)
output = []
for index, each in enumerate(results):
output.append(outputs[index] == each)
return output, results, outputs
@timer
def do_it(filename, one):
p = Popen(['python', filename], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
grep_stdout = p.communicate(input=one.encode())[0]
# print(grep_stdout.decode())
return grep_stdout.decode()
def _test_python_input(filename, inputs):
results = []
for each_input in inputs:
results.append(do_it(filename, each_input))
return results
@timer
def _test_python_normal(filename):
p = Popen(['python', filename], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
grep_stdout = p.communicate()[0]
print(grep_stdout.decode())
def _run_python_tests(arguments):
fp = open(arguments['FILE_NAME'], 'r')
content = fp.read()
inputs = _collect_input(content)
outputs = _collect_output(content)
# print inputs, outputs
if inputs or outputs:
if outputs and inputs:
return _test_python_input_output(arguments['FILE_NAME'], inputs, outputs)
if inputs:
return _test_python_input(arguments['FILE_NAME'], inputs)
else:
print('No test cases were provided so running as is')
print()
print('YOUR OUTPUT')
print('===========')
_test_python_normal(arguments['FILE_NAME'])
def _run_tests(arguments):
return _run_python_tests(arguments)
def status(boolean):
if boolean:
return 'PASS'
return 'FAIL'
def pretty_print(to_print, times):
if to_print is None:
return ''
if len(to_print) is 3:
bool_res, results, expected = to_print
print("YOUR OUTPUT")
print('===========')
for each in results:
print(each)
print("EXPECTED OUTPUT")
print('===============')
for each in expected:
print(each)
print("PASS/FAIL (of %d testcases)"%len(results))
print('=========')
for index, each in enumerate(bool_res):
print('TESTCASE %d'%(index+1), status(each), times[index], 'seconds')
if len(to_print) is 1:
print("YOUR OUTPUT")
print('===========')
print(to_print[0])
def main():
arguments = docopt(__doc__)
if len(arguments) is not 1:
raise ValueError('Expected 1 argument, %d given'%len(arguments))
print('Alex is working on ', arguments['FILE_NAME'])
print()
pretty_print(_run_tests(arguments), time_log)
if __name__ == '__main__':
main()
|
the-stack_0_21874 | # coding: utf-8
# In[3]:
import json
import os
import os.path
import boto3
import decimal
from pandas.io.json import json_normalize
import constants as cn
# In[4]:
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
# Scan call to DynamoDB
def scanDynamo(LastEvaluatedKey=None):
# MODE = 'walking'
# MODE = 'transit2'
# MODE = 'driving2'
MODE = 'bicycling'
# Get AWS service resource.
dynamodb = boto3.resource('dynamodb', region_name='us-east-1',
aws_access_key_id = os.environ['aws_access_key_id'],
aws_secret_access_key = os.environ['aws_secret_access_key'])
table = dynamodb.Table('seamo-' + MODE)
if (LastEvaluatedKey is None):
response = table.scan()
else:
response = table.scan(
ExclusiveStartKey = LastEvaluatedKey
)
print (response['ScannedCount'])
#print (response)
df_response = json_normalize(response,'Items')
# save data to file
# save data to file
if os.path.isfile(os.path.join(cn.DYNAMODB_OUT_DIR, 'dynamo_out_' + MODE + '.csv')):
df_response.to_csv(os.path.join(cn.DYNAMODB_OUT_DIR, 'dynamo_out_' + MODE + '.csv'), mode='a', header=False, index=False, encoding="utf-8")
else:
df_response.to_csv(os.path.join(cn.DYNAMODB_OUT_DIR, 'dynamo_out_' + MODE + '.csv'), mode='w', header=True, index=False, encoding="utf-8")
# Call next page if available
if 'LastEvaluatedKey' in response:
scanDynamo(response['LastEvaluatedKey'])
else:
return
# In[5]:
scanDynamo() |
the-stack_0_21875 |
# coding: utf-8
# In[1]:
import marvin
from marvin.tools.maps import Maps
# Returns nested dictionary of the map, inverse variances, and mask of each relevent emission line
def getDataMap(plate,ifu):# plate number and ifu number
plateifu = plate + '-' + ifu
cube = marvin.tools.Cube(plateifu)
maps = Maps(plateifu)
HaF_map = maps["emline_gflux_ha_6564"]
HbF_map = maps["emline_gflux_hb_4862"]
OII_map = maps["emline_gflux_oii_3727"]
OIII_map = maps["emline_gflux_oiii_5008"]
NII_map = maps["emline_gflux_nii_6585"]
HaF = HaF_map,HaF_map.ivar,HaF_map.mask
HbF = HbF_map,HbF_map.ivar,HbF_map.mask
OII = OII_map,OII_map.ivar,OII_map.mask
OIII = OIII_map,OIII_map.ivar,OIII_map.mask
NII = NII_map,NII_map.ivar,NII_map.mask
HaF = dict(zip(['map','ivar','mask'],HaF))
HbF = dict(zip(['map','ivar','mask'],HbF))
OII = dict(zip(['map','ivar','mask'],OII))
OIII = dict(zip(['map','ivar','mask'],OIII))
NII = dict(zip(['map','ivar','mask'],NII))
# return HaF,HbF,OII,OIII,NII
return{
'HaF': HaF,
'HbF': HbF,
'OII': OII,
'OIII': OIII,
'NII': NII
}
# Example of accessing data
getDataMap('8485','1901')['HaF']['mask'][17]
|
the-stack_0_21876 | '''Testing numerical differentiation
Still some problems, with API (args tuple versus *args)
finite difference Hessian has some problems that I did not look at yet
Should Hessian also work per observation, if fun returns 2d
'''
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
import statsmodels.api as sm
from statsmodels.tools import numdiff
from statsmodels.tools.numdiff import (approx_fprime, approx_fprime_cs,
approx_hess_cs)
DEC3 = 3
DEC4 = 4
DEC5 = 5
DEC6 = 6
DEC8 = 8
DEC13 = 13
DEC14 = 14
def maxabs(x,y):
return np.abs(x-y).max()
def fun(beta, x):
return np.dot(x, beta).sum(0)
def fun1(beta, y, x):
#print(beta.shape, x.shape)
xb = np.dot(x, beta)
return (y-xb)**2 #(xb-xb.mean(0))**2
def fun2(beta, y, x):
#print(beta.shape, x.shape)
return fun1(beta, y, x).sum(0)
#ravel() added because of MNLogit 2d params
class CheckGradLoglikeMixin(object):
def test_score(self):
for test_params in self.params:
sc = self.mod.score(test_params)
scfd = numdiff.approx_fprime(test_params.ravel(),
self.mod.loglike)
assert_almost_equal(sc, scfd, decimal=1)
sccs = numdiff.approx_fprime_cs(test_params.ravel(),
self.mod.loglike)
assert_almost_equal(sc, sccs, decimal=11)
def test_hess(self):
for test_params in self.params:
he = self.mod.hessian(test_params)
hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hefd, decimal=DEC8)
#NOTE: notice the accuracy below
assert_almost_equal(he, hefd, decimal=7)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=True)
assert_allclose(he, hefd, rtol=1e-9)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=False)
assert_almost_equal(he, hefd, decimal=4)
hescs = numdiff.approx_fprime_cs(test_params.ravel(),
self.mod.score)
assert_allclose(he, hescs, rtol=1e-13)
hecs = numdiff.approx_hess_cs(test_params.ravel(),
self.mod.loglike)
assert_allclose(he, hecs, rtol=1e-9)
#NOTE: Look at the lack of precision - default epsilon not always
#best
grad = self.mod.score(test_params)
hecs, gradcs = numdiff.approx_hess1(test_params, self.mod.loglike,
1e-6, return_grad=True)
assert_almost_equal(he, hecs, decimal=1)
assert_almost_equal(grad, gradcs, decimal=1)
hecs, gradcs = numdiff.approx_hess2(test_params, self.mod.loglike,
1e-4, return_grad=True)
assert_almost_equal(he, hecs, decimal=3)
assert_almost_equal(grad, gradcs, decimal=1)
hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-5)
assert_almost_equal(he, hecs, decimal=4)
class TestGradMNLogit(CheckGradLoglikeMixin):
@classmethod
def setup_class(cls):
#from .results.results_discrete import Anes
data = sm.datasets.anes96.load(as_pandas=False)
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
cls.mod = sm.MNLogit(data.endog, exog)
#def loglikeflat(cls, params):
#reshapes flattened params
# return cls.loglike(params.reshape(6,6))
#cls.mod.loglike = loglikeflat #need instance method
#cls.params = [np.ones((6,6)).ravel()]
res = cls.mod.fit(disp=0)
cls.params = [res.params.ravel('F')]
def test_hess(self):
#NOTE: I had to overwrite this to lessen the tolerance
for test_params in self.params:
he = self.mod.hessian(test_params)
hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hefd, decimal=DEC8)
#NOTE: notice the accuracy below and the epsilon changes
# this does not work well for score -> hessian with non-cs step
# it's a little better around the optimum
assert_almost_equal(he, hefd, decimal=7)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=True)
assert_almost_equal(he, hefd, decimal=4)
hefd = numdiff.approx_fprime(test_params, self.mod.score, 1e-9,
centered=False)
assert_almost_equal(he, hefd, decimal=2)
hescs = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hescs, decimal=DEC8)
hecs = numdiff.approx_hess_cs(test_params, self.mod.loglike)
assert_almost_equal(he, hecs, decimal=5)
#NOTE: these just do not work well
#hecs = numdiff.approx_hess1(test_params, self.mod.loglike, 1e-3)
#assert_almost_equal(he, hecs, decimal=1)
#hecs = numdiff.approx_hess2(test_params, self.mod.loglike, 1e-4)
#assert_almost_equal(he, hecs, decimal=0)
hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-4)
assert_almost_equal(he, hecs, decimal=0)
class TestGradLogit(CheckGradLoglikeMixin):
@classmethod
def setup_class(cls):
data = sm.datasets.spector.load(as_pandas=False)
data.exog = sm.add_constant(data.exog, prepend=False)
#mod = sm.Probit(data.endog, data.exog)
cls.mod = sm.Logit(data.endog, data.exog)
#res = mod.fit(method="newton")
cls.params = [np.array([1,0.25,1.4,-7])]
##loglike = mod.loglike
##score = mod.score
##hess = mod.hessian
class CheckDerivativeMixin(object):
@classmethod
def setup_class(cls):
nobs = 200
#x = np.arange(nobs*3).reshape(nobs,-1)
np.random.seed(187678)
x = np.random.randn(nobs,3)
xk = np.array([1,2,3])
xk = np.array([1.,1.,1.])
#xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1*np.random.randn(nobs)
xkols = np.dot(np.linalg.pinv(x),y)
cls.x = x
cls.y = y
cls.params = [np.array([1.,1.,1.]), xkols]
cls.init()
@classmethod
def init(cls):
pass
def test_grad_fun1_fd(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
epsilon = 1e-6
gfd = numdiff.approx_fprime(test_params, fun, epsilon=epsilon,
args=self.args)
gfd += numdiff.approx_fprime(test_params, fun, epsilon=-epsilon,
args=self.args)
gfd /= 2.
assert_almost_equal(gtrue, gfd, decimal=DEC6)
def test_grad_fun1_fdc(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
# default epsilon of 1e-6 is not precise enough here
gfd = numdiff.approx_fprime(test_params, fun, epsilon=1e-8,
args=self.args, centered=True)
assert_almost_equal(gtrue, gfd, decimal=DEC5)
def test_grad_fun1_cs(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
gcs = numdiff.approx_fprime_cs(test_params, fun, args=self.args)
assert_almost_equal(gtrue, gcs, decimal=DEC13)
def test_hess_fun1_fd(self):
for test_params in self.params:
#hetrue = 0
hetrue = self.hesstrue(test_params)
if hetrue is not None: #Hessian does not work for 2d return of fun
fun = self.fun()
#default works, epsilon 1e-6 or 1e-8 is not precise enough
hefd = numdiff.approx_hess1(test_params, fun, #epsilon=1e-8,
# TODO: should be kwds
args=self.args)
assert_almost_equal(hetrue, hefd, decimal=DEC3)
#TODO: I reduced precision to DEC3 from DEC4 because of
# TestDerivativeFun
hefd = numdiff.approx_hess2(test_params, fun, #epsilon=1e-8,
# TODO: should be kwds
args=self.args)
assert_almost_equal(hetrue, hefd, decimal=DEC3)
hefd = numdiff.approx_hess3(test_params, fun, #epsilon=1e-8,
# TODO: should be kwds
args=self.args)
assert_almost_equal(hetrue, hefd, decimal=DEC3)
def test_hess_fun1_cs(self):
for test_params in self.params:
#hetrue = 0
hetrue = self.hesstrue(test_params)
if hetrue is not None: #Hessian does not work for 2d return of fun
fun = self.fun()
hecs = numdiff.approx_hess_cs(test_params, fun, args=self.args)
assert_almost_equal(hetrue, hecs, decimal=DEC6)
class TestDerivativeFun(CheckDerivativeMixin):
@classmethod
def setup_class(cls):
super(TestDerivativeFun,cls).setup_class()
xkols = np.dot(np.linalg.pinv(cls.x), cls.y)
cls.params = [np.array([1.,1.,1.]), xkols]
cls.args = (cls.x,)
def fun(self):
return fun
def gradtrue(self, params):
return self.x.sum(0)
def hesstrue(self, params):
return np.zeros((3,3)) #make it (3,3), because test fails with scalar 0
#why is precision only DEC3
class TestDerivativeFun2(CheckDerivativeMixin):
@classmethod
def setup_class(cls):
super(TestDerivativeFun2,cls).setup_class()
xkols = np.dot(np.linalg.pinv(cls.x), cls.y)
cls.params = [np.array([1.,1.,1.]), xkols]
cls.args = (cls.y, cls.x)
def fun(self):
return fun2
def gradtrue(self, params):
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None]).sum(0)
#2*(y-np.dot(x, params)).sum(0)
def hesstrue(self, params):
x = self.x
return 2*np.dot(x.T, x)
class TestDerivativeFun1(CheckDerivativeMixin):
@classmethod
def setup_class(cls):
super(TestDerivativeFun1, cls).setup_class()
xkols = np.dot(np.linalg.pinv(cls.x), cls.y)
cls.params = [np.array([1.,1.,1.]), xkols]
cls.args = (cls.y, cls.x)
def fun(self):
return fun1
def gradtrue(self, params):
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None])
def hesstrue(self, params):
return None
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None]) #TODO: check shape
def test_dtypes():
def f(x):
return 2*x
desired = np.array([[2, 0],
[0, 2]])
assert_allclose(approx_fprime(np.array([1, 2]), f), desired)
assert_allclose(approx_fprime(np.array([1., 2.]), f), desired)
assert_allclose(approx_fprime(np.array([1.+0j, 2.+0j]), f), desired)
if __name__ == '__main__': # FIXME: turn into tests or move/remove
epsilon = 1e-6
nobs = 200
x = np.arange(nobs*3).reshape(nobs,-1)
x = np.random.randn(nobs,3)
xk = np.array([1,2,3])
xk = np.array([1.,1.,1.])
#xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1*np.random.randn(nobs)
xkols = np.dot(np.linalg.pinv(x),y)
print(approx_fprime((1,2,3),fun,epsilon,x))
gradtrue = x.sum(0)
print(x.sum(0))
gradcs = approx_fprime_cs((1,2,3), fun, (x,), h=1.0e-20)
print(gradcs, maxabs(gradcs, gradtrue))
print(approx_hess_cs((1,2,3), fun, (x,), h=1.0e-20)) #this is correctly zero
print(approx_hess_cs((1,2,3), fun2, (y,x), h=1.0e-20)-2*np.dot(x.T, x))
print(numdiff.approx_hess(xk,fun2,1e-3, (y,x))[0] - 2*np.dot(x.T, x))
gt = (-x*2*(y-np.dot(x, [1,2,3]))[:,None])
g = approx_fprime_cs((1,2,3), fun1, (y,x), h=1.0e-20)#.T #this should not be transposed
gd = numdiff.approx_fprime((1,2,3),fun1,epsilon,(y,x))
print(maxabs(g, gt))
print(maxabs(gd, gt))
data = sm.datasets.spector.load(as_pandas=False)
data.exog = sm.add_constant(data.exog, prepend=False)
#mod = sm.Probit(data.endog, data.exog)
mod = sm.Logit(data.endog, data.exog)
#res = mod.fit(method="newton")
test_params = [1,0.25,1.4,-7]
loglike = mod.loglike
score = mod.score
hess = mod.hessian
#cs does not work for Probit because special.ndtr does not support complex
#maybe calculating ndtr for real and imag parts separately, if we need it
#and if it still works in this case
print('sm', score(test_params))
print('fd', numdiff.approx_fprime(test_params,loglike,epsilon))
print('cs', numdiff.approx_fprime_cs(test_params,loglike))
print('sm', hess(test_params))
print('fd', numdiff.approx_fprime(test_params,score,epsilon))
print('cs', numdiff.approx_fprime_cs(test_params, score))
hesscs = numdiff.approx_hess_cs(test_params, loglike)
print('cs', hesscs)
print(maxabs(hess(test_params), hesscs))
data = sm.datasets.anes96.load(as_pandas=False)
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
datap = sm.datasets.randhie.load(as_pandas=False)
nobs = len(datap.endog)
exogp = sm.add_constant(datap.exog.view(float).reshape(nobs,-1),
prepend=False)
modp = sm.Poisson(datap.endog, exogp)
resp = modp.fit(method='newton', disp=0)
|
the-stack_0_21877 | from flask import g, current_app
from opensearchpy import OpenSearch
def get_opensearch():
if 'opensearch' not in g:
host = 'localhost'
port = 9200
# totally ok here.
auth = ('admin', 'admin')
g.opensearch = OpenSearch(
hosts=[{'host': host, 'port': port}],
http_compress=True,
http_auth=auth,
use_ssl=True,
verify_certs=False,
ssl_assert_hostname=False,
ssl_show_warn=False,
)
return g.opensearch
|
the-stack_0_21878 | from doc_utils import DocUtils
from relevanceai.base import _Base
from relevanceai.api.client import BatchAPIClient
from typing import List, Dict
class Utils(BatchAPIClient, _Base, DocUtils):
def __init__(self, project, api_key):
self.project = project
self.api_key = api_key
super().__init__(project, api_key)
def _is_valid_vector_name(self, dataset_id, vector_name: str) -> bool:
"""
Check vector field name is valid
"""
vector_fields = self.get_vector_fields(dataset_id)
schema = self.datasets.schema(dataset_id)
if vector_name in schema.keys():
if vector_name in vector_fields:
return True
else:
raise ValueError(f"{vector_name} is not a valid vector name")
else:
raise ValueError(f"{vector_name} is not in the {dataset_id} schema")
def _is_valid_label_name(self, dataset_id, label_name: str) -> bool:
"""
Check vector label name is valid. Checks that it is either numeric or text
"""
schema = self.datasets.schema(dataset_id)
if label_name == "_id":
return True
if label_name in list(schema.keys()):
if schema[label_name] in ["numeric", "text"]:
return True
else:
raise ValueError(f"{label_name} is not a valid label name")
else:
raise ValueError(f"{label_name} is not in the {dataset_id} schema")
def _remove_empty_vector_fields(self, docs, vector_field: str) -> List[Dict]:
"""
Remove documents with empty vector fields
"""
return [d for d in docs if d.get(vector_field)]
|
the-stack_0_21879 | #!/usr/bin/python3
from argparse import ArgumentParser
from inspect import isclass
from scenexec import execute
import json
import os
# Parse arguments
def get_args():
p = ArgumentParser(description='CLOSURE Scenario Configuration')
p.add_argument('-f', '--file', required=True, type=str, help='Input config file')
p.add_argument('-l', '--layout', required=True, type=str, help='Input layout file')
p.add_argument('-s', '--settings', required=True, type=str, help='Emulator settings file')
p.add_argument('-o', '--outfile', required=True, type=str, help='Output IMN file')
return p.parse_args()
# Base class for object containment hierarchy built from JSON
class base:
def __init__(self,**kwargs):
for k in kwargs: setattr(self,k,kwargs[k])
def render(self,depth,style='basic',layout=None, settings=None):
if style != 'basic': raise Exception('Unsupported style: ' + style)
return ' ' * depth + self.__class__.__name__ + '\n'
def field_render(depth,fldval,fldnam,style='basic',layout=None, settings=None):
if style != 'basic': raise Exception('Unsupported style: ' + style)
return ' ' * depth + fldnam + ':' + str(fldval) + '\n'
# Return non-function, non-internal fields of scenario class instance
def fields(v):
return [a for a in dir(v) if not callable(getattr(v,a)) and not a.startswith("__")]
# Name of valid scenario class
def valid_class_name(n):
g = globals()
return True if n in g and isclass(g[n]) and issubclass(g[n], base) else False
# Instance of valid scenario class
def valid_class_instance(v):
return True if isclass(type(v)) and issubclass(type(v), base) else False
# Compose scenario from dict
def compose(n,d):
if not valid_class_name(n): raise Exception('Unsupported class: ' + n)
def subcomp(k,v):
if isinstance(v,list): return [compose(k,i) for i in v]
elif isinstance(v,dict): return compose(k,v)
else: return v
return globals()[n](**{k:subcomp(k,v) for k,v in d.items()})
# Generic traversal using depth-first search
def traverse(v,name,depth,style,layout=None):
ret = ''
if valid_class_instance(v):
ret += v.render(depth,style=style,layout=layout,settings=None)
for i in fields(v):
x = getattr(v,i)
if isinstance(x,list):
ret += ''.join([traverse(j,i,depth+1,style,layout) for j in x])
else:
ret += traverse(x,i,depth+1,style,layout)
else:
ret += base.field_render(depth,v,name,style,layout=layout,settings=None)
return ret
# Generic rendering of all children that are class instances or list thereof
def render_children(v,depth,style,layout,settings,exclude=[]):
ret = ''
for i in fields(v):
if i in exclude: continue
x = getattr(v,i)
if isinstance(x,list):
ret += ''.join([j.render(depth+1,style,layout,settings) for j in x])
elif valid_class_instance(x):
ret += x.render(depth+1,style,layout,settings)
else:
pass # note, we ignore all non class fields in generic render_children
return ret
##############################################
# IMN scenario generator specifc code below
class IDGen():
def __init__(self):
self.nid = 0
self.lid = 0
self.cid = 0
self.aid = 0
self.nm2id = {}
def get_id(self,nm,typ):
if nm in self.nm2id:
return self.nm2id[nm]
else:
if typ in ['NODE', 'xdhost', 'inthost', 'hub', 'xdgateway']:
self.nid += 1
mnm = 'n'+str(self.nid)
elif typ in ['link', 'left', 'right']:
self.lid += 1
mnm = 'l'+str(self.lid)
elif typ in ['canvas']:
self.cid += 1
mnm = 'c'+str(self.cid)
elif typ in ['annotation']:
self.aid += 1
mnm = 'a'+str(self.aid)
self.nm2id[mnm if nm == None else nm] = mnm
return mnm
# Extend base with a class member and class method for ID generation/mapping
class basewid(base):
__idgen__ = IDGen()
def get_id(nm,typ): return basewid.__idgen__.get_id(nm,typ)
def render(self,depth,style='imn',layout=None,settings=None):
return render_children(self,depth,style,layout,settings) if style == 'imn' else super().render(depth,style,layout,settings)
#####################################################################################################
# Scenario classes derived from basewid
class scenario(basewid):
def get_hostnames(self):
names = []
for e in self.enclave:
for h in e.xdhost + e.inthost:
names.append(h.hostname)
for h in self.xdgateway:
names.append(h.hostname)
return names
def get_xdlinks_for_xdgateway(self, xdg):
links = []
xd_peers = []
for p in xdg.ifpeer:
xd_peers.append(p.peername)
for xdlink in self.xdlink:
if xdlink.left.f in xd_peers and xdlink.right.t in xd_peers:
links.append(xdlink)
return links
def render_addons(self, depth, style, layout, settings):
#instantiation hook
ret = 'hook 3:instantiation_hook.sh {\n'
for n in self.get_hostnames():
ret += f' mkdir $SESSION_DIR/{n}.conf/scripts\n'
ret += f' mkdir $SESSION_DIR/{n}.conf/tools\n'
ret += f' mkdir $SESSION_DIR/{n}.conf/apps\n'
ret += f" cp -r {settings.emuroot}/scripts/* $SESSION_DIR/{n}.conf/scripts\n"
ret += f' cp -r {settings.emuroot}/tools/* $SESSION_DIR/{n}.conf/tools\n'
ret += f' cp -r {settings.emuroot}/.apps/*{n}* $SESSION_DIR/{n}.conf/apps &> /dev/null\n'
ret += '}\n'
return ret if style == 'imn' else ""
def render(self, depth, style='imn', layout=None, settings=None):
return super().render(depth,style,layout,settings) + self.render_addons(depth,style,layout,settings)
class enclave(basewid): pass # use basewid rendering
class xdhost(basewid):
def render(self,depth,style='imn',layout=None,settings=None):
if style == 'imn':
ret = ""
nid = basewid.__idgen__.get_id(self.hostname, type(self).__name__)
nodelayout = layout.get_node_layout(self.hostname)
ret+=f'''node {nid} {{
type router
model host
network-config {{
\thostname {self.hostname}
\t!
'''
ret += self.nwconf.render(depth, style, layout,settings)
ret += ' }\n'
ret += nodelayout.render(depth,style,layout,settings)
ret += self.swconf.render(depth,style,layout,settings)
for p in self.ifpeer:
ret += p.render(depth, style, layout, settings)
ret += gen_custom_config(settings)
ret += '}\n'
cmdup= "cmdup=("
for c in gen_cmdup(self, settings):
cmdup += f"'{c}', "
cmdup += ')'
ret = ret.replace('cmdup=XXX', cmdup)
return ret
else:
return super().render(depth,style,layout,settings)
class inthost(basewid):
def render(self,depth,style='imn',layout=None,settings=None):
if style == 'imn':
nid = basewid.__idgen__.get_id(self.hostname, type(self).__name__)
nodelayout = layout.get_node_layout(self.hostname)
ret=f'''node {nid} {{
type router
model PC
network-config {{
\thostname {self.hostname}
\t!\n'''
ret += self.nwconf.render(depth, style, layout, settings)
ret += ' }\n'
ret += nodelayout.render(depth, style, layout, settings)
ret += self.swconf.render(depth, style, layout, settings)
for p in self.ifpeer:
ret += p.render(depth, style, layout, settings)
ret += gen_custom_config(settings)
ret += '}\n'
cmdup= "cmdup=("
for c in gen_cmdup(self, settings):
cmdup += f"'{c}', "
cmdup += ')'
ret = ret.replace('cmdup=XXX', cmdup)
return ret
else:
return super().render(depth, style, layout, settings)
class hub(basewid):
def render(self,depth,style='imn',layout=None,settings=None):
if style == 'imn':
nid = basewid.__idgen__.get_id(self.hostname, type(self).__name__)
nodelayout = layout.get_node_layout(self.hostname)
ret=f'''node {nid} {{
type hub
network-config {{
\thostname {self.hostname}
\t!
}}\n'''
ret += nodelayout.render(depth, style, layout, settings)
for i in self.ifpeer:
ret += i.render(depth, style, layout, settings)
ret += '}\n'
return ret
else:
return super().render(depth, style, layout, settings)
class xdgateway(basewid):
def render(self,depth, style='imn',layout=None,settings=None):
if style == 'imn':
nid = basewid.__idgen__.get_id(self.hostname, type(self).__name__)
nodelayout = layout.get_node_layout(self.hostname)
ret=f'''node {nid} {{
type router
model router
network-config {{
\thostname {self.hostname}
\t!
'''
ret += self.nwconf.render(depth, style, layout, settings)
ret += ' }\n'
ret += nodelayout.render(depth, style, layout, settings)
ret += self.swconf.render(depth, style, layout, settings)
for p in self.ifpeer:
ret += p.render(depth,style, layout, settings)
ret += gen_custom_config(settings)
ret += '}\n'
cmdup= "cmdup=("
for c in gen_cmdup(self, settings):
cmdup += f"'{c}', "
cmdup += ')'
ret = ret.replace('cmdup=XXX', cmdup)
return ret
else:
return super().render(depth,style,layout,settings)
class link(basewid):
def render(self,depth,style='imn',layout=None,settings=None):
lid = basewid.__idgen__.get_id(self.f+'<-->'+self.t, type(self).__name__)
return f'''link {lid} {{
nodes {{{basewid.__idgen__.get_id(self.f, type(self).__name__)} {basewid.__idgen__.get_id(self.t, type(self).__name__)}}}
bandwidth {self.bandwidth}
delay {self.delay}
}}\n''' if style == 'imn' else super().render(depth,style,layout,settings)
class xdlink(basewid):
def render(self,depth,style='imn',layout=None,settings=None):
if style == 'imn':
ret = self.left.render(depth,style,layout,settings)
ret += self.right.render(depth,style,layout,settings)
return ret
else:
return super().render(depth,style,layout,settings)
## TODO
class hwconf(basewid): pass
class swconf(basewid):
def render(self, depth, style='imn', layout=None,settings=None):
svcs = ' '.join(svc.s for svc in self.service)
ret = f' services {{{svcs}}}\n'
return ret if style == 'imn' else super().render(depth, style, layout, settings)
class nwconf(basewid):
def render(self, depth, style='imn', layout=None, settings=None):
if style == 'imn':
ret = ""
for i in self.interface:
ret += i.render(depth, style, layout, settings)
return ret
else:
return super().render(depth,style,layout,settings)
class interface(basewid):
def render(self, depth, style='imn', layout=None, settings=None):
return f"\tinterface {self.ifname}\n\tip address {self.addr}\n\t!\n" if style == 'imn' else super().render(depth,style,layout,settings)
class ifpeer(basewid):
def render(self, depth, style='imn', layout=None, settings=None):
return f' interface-peer {{{self.ifname} {basewid.__idgen__.get_id(self.peername, "NODE")}}}\n' if style == 'imn' else super().render(depth,style,layout,settings)
class left(basewid):
def render(self, depth, style='imn', layout=None, settings=None):
lid = basewid.__idgen__.get_id(self.f+'<-->'+self.t, type(self).__name__)
return f'''link {lid} {{
nodes {{{basewid.__idgen__.get_id(self.f, type(self).__name__)} {basewid.__idgen__.get_id(self.t, type(self).__name__)}}}
bandwidth {{{self.egress.bandwidth} {self.ingress.bandwidth}}}
delay {{{self.egress.delay} {self.ingress.delay}}}
}}\n''' if style == 'imn' else super().render(depth,style,layout,settings)
class right(basewid):
def render(self, depth, style='imn', layout=None, settings=None):
lid = basewid.__idgen__.get_id(self.f+'<-->'+self.t, type(self).__name__)
return f'''link {lid} {{
nodes {{{basewid.__idgen__.get_id(self.f, type(self).__name__)} {basewid.__idgen__.get_id(self.t, type(self).__name__)}}}
bandwidth {{{self.egress.bandwidth} {self.ingress.bandwidth}}}
delay {{{self.egress.delay} {self.ingress.delay}}}
}}\n''' if style == 'imn' else super().render(depth,style,layout,settings)
## TODO
class egress(basewid): pass
## TODO
class ingress(basewid): pass
# Layout classes
class scenlayout(basewid):
def render(self,depth,style='imn',layout=None,settings=None):
return render_children(self,depth,style,layout,settings,exclude=['nodelayout']) if style == 'imn' else super().render(depth,style,layout,settings)
def get_node_layout(self, nod):
x = [n for n in self.nodelayout if n.hostname == nod]
if len(x) != 1: raise Exception ('Error getting layout for:' + nod)
return x[0]
def get_node_service(self, nod):
x = [n for n in self.nodeservice if n.hostname == nod]
if len(x) != 1: raise Exception ('Error getting layout for:' + nod)
return x[0]
class option(basewid): pass
class optglobal(basewid):
def render(self,depth,style='imn',layout=None,settings=None):
return f'''option global {{
interface_names {self.interface_names}
ip_addresses {self.ip_addresses}
ipv6_addresses {self.ipv6_addresses}
node_labels {self.node_labels}
link_labels {self.link_labels}
show_api {self.show_api}
background_images {self.background_images}
annotations {self.annotations}
grid {self.grid}
traffic_start {self.traffic_start}
}}\n''' if style == 'imn' else super().render(depth,style,layout,settings)
class session(basewid):
def render(self,depth,style='imn',layout=None,settings=None):
return 'option session { }\n' if style == 'imn' else super().render(depth,style,layout,settings)
class canvas(basewid):
def render(self,depth,style='imn',layout=None,settings=None):
cid = basewid.get_id(self.name,'canvas')
return 'canvas %s { name { %s } }\n' % (cid,self.name) if style == 'imn' else super().render(depth,style,layout,settings)
class annotation(basewid):
def render(self,depth,style='imn',layout=None,settings=None):
aid = basewid.get_id(None,'annotation')
return f'''annotation {aid} {{
{self.bbox.render(depth, style, layout, settings)}
type {self.type}
label {self.label}
labelcolor {self.labelcolor}
fontfamily {self.fontfamily}
fontsize {self.fontsize}
color {self.color}
width {self.width}
border {self.border}
rad {self.rad}
canvas {basewid.__idgen__.get_id(self.canvas, 'canvas')}
}}\n''' if style == 'imn' else super().render(depth,style,layout,settings)
class bbox(basewid):
def render(self, depth, style='imn', layout=None, settings=None):
return f'iconcoords {{{self.x1} {self.y1} {self.x2} {self.y2}}}' if style == 'imn' else super().render(depth,style,layout,settings)
class nodelayout(basewid):
def render(self, depth, style='imn', layout=None, settings=None):
return f' canvas {basewid.__idgen__.get_id(self.canvas, "canvas")}\n {self.iconcoords.render(depth, style, layout, settings)}\n {self.labelcoords.render(depth, style, layout, settings)}\n' if style == 'imn' else super().render(depth,style,layout,settings)
class iconcoords(basewid):
def render(self, depth, style='imn', layout=None,settings=None):
return f'iconcoords {{{self.x} {self.y}}}' if style == 'imn' else super().render(depth,style,layout,settings)
class labelcoords(basewid):
def render(self, depth, style='imn', layout=None, settings=None):
return f'labelcoords {{{self.x} {self.y}}}' if style == 'imn' else super().render(depth,style,layout,settings)
class service(basewid): pass
class settings(basewid): pass
def gen_custom_config(settings):
ret = f' custom-config {{\n\tcustom-config-id service:UserDefined\n\tcustom-command UserDefined\n\t'
dirstr = 'dirs=('
for dir in settings.shadow_directories.rstrip(';').split(';'):
dirstr += f"'{dir}', "
cmdupstr = 'cmdup=XXX'
ret += f'config {{\n\t{dirstr} )\n\t{cmdupstr}\n \t}}\n }}\n'
return ret
def gen_cmdup(x, settings):
cmds = []
cmds.append(f'scripts/common/common-config-ssh.sh {settings.emuroot}/{settings.snapdir}')
cmds.append(f'scripts/common/common-setbgcolor.sh')
if type(x).__name__ == 'xdhost':
cmds.append(f'scripts/xdh/xdh-config-core-interfaces.sh')
cmds.append(f'scripts/xdh/xdh-start-qemu-instance.sh {x.hwconf.arch} {settings.emuroot}/{settings.snapdir}/{x.swconf.os}-{x.hwconf.arch}-{x.hostname}.qcow2 {settings.imgdir}/linux-kernel-{x.hwconf.arch}-{x.swconf.kernel} {os.environ["USER"]}')
return cmds
if __name__ == '__main__':
args = get_args()
with open(args.file, 'r') as inf1: conf = json.load(inf1)
with open(args.layout, 'r') as inf2: layo = json.load(inf2)
with open(args.settings, 'r') as inf3: sett = json.load(inf3)
scen = compose('scenario',conf)
locs = compose('scenlayout',layo)
sets = compose('settings', sett)
setattr(sets, 'emuroot', f'/home/{os.getenv("USER")}/gaps/build/src/emu')
ret = scen.render(0,'imn',locs,sets)
ret += locs.render(0,'imn',None, None)
IMNDIR=f'{sets.emuroot}/{sets.imndir}'
if not os.path.exists(IMNDIR):
os.mkdir(IMNDIR)
setattr(args, 'imnAbsPath', f'{sets.emuroot}/{sets.imndir}/{args.outfile}')
with open(args.imnAbsPath,'w') as outf: outf.write(ret)
outf.close()
execute(scen, locs, sets, args)
#print(traverse(scen','scenario',0,'basic',locs))
#print(traverse(locs,'scenlayout', 0,'basic',locs))
#print(basewid.__idgen__.nm2id)
|
the-stack_0_21880 | from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QSizePolicy, QAbstractButton
from PyQt5.QtCore import Qt, QSize, QPropertyAnimation, pyqtProperty, QRectF
from PyQt5.QtGui import QPainter
from cgi import parse_header
from karen.shared import sendHTTPRequest
import time
#from PyQt5.Qt import QPalette
class DownloadThread(QtCore.QThread):
data_downloaded = QtCore.pyqtSignal(object)
def __init__(self, url, refreshInterval=3, authToken=None):
QtCore.QThread.__init__(self)
self.isRunning = False
self.url = url
self.refreshInterval = refreshInterval
self.authToken = authToken
def run(self):
#info = urllib2.urlopen(self.url).info()
self.isRunning = True
headers=None
if self.authToken is not None:
headers = { "Cookie": "token="+self.authToken }
while self.isRunning:
retVal, retType, retData = sendHTTPRequest(self.url, type="GET", headers=headers)
if retType.startswith("image/"):
self.data_downloaded.emit(retData)
#print("emitted data")
time.sleep(self.refreshInterval)
elif retType.startswith("multipart/x-mixed-replace"):
cType, cParam = parse_header(retType)
boundary = cParam["boundary"]
boundary = boundary.encode()
endHeaders = "\n\n".encode()
part = None
try:
for data in retData.iter_content(chunk_size=64):
if not self.isRunning:
return
if data.find(boundary) >= 0:
p = data.find(boundary)
if part is None:
part = data[:p]
else:
part += data[:p]
if part.find(endHeaders) >= 0:
part = part[part.find(endHeaders)+len(endHeaders):]
self.data_downloaded.emit(part)
part = data[p:]
else:
if part is None:
part = data
else:
part += data
except:
pass
class VideoButton(QtWidgets.QPushButton):
def __init__(self, *args, **kwargs):
self.url = None
self.refreshInterval = 3
self.thread = None
self.pixmap = None
self.authToken = None
super().__init__(*args, **kwargs)
def setBackgroundUrl(self, url, refreshInterval=3, authToken=None):
self.url = url
self.refreshInterval = refreshInterval
self.authToken = authToken
return True
def start(self):
if self.thread is None or not self.thread.isRunning:
self.thread = DownloadThread(self.url, refreshInterval=self.refreshInterval, authToken=self.authToken)
self.thread.data_downloaded.connect(self.setBackgroundImage)
self.thread.start()
elif self.thread is not None:
self.thread.isRunning = True
return True
def stop(self):
if self.thread is not None:
self.thread.isRunning = False
def paintEvent(self, event):
if self.pixmap is not None:
painter = QPainter(self)
#painter.setBrush(self.pixmap)
#painter.begin(self)
pixmap = self.pixmap.copy()
painter.drawPixmap(0,0,self.width(),self.height(),pixmap)
else:
super().paintEvent(event)
def setBackgroundImage(self, data):
pixmap = QtGui.QPixmap()
pixmap.loadFromData(data)
pixmap = pixmap.scaled(self.width(), self.height(), QtCore.Qt.KeepAspectRatio)
self.pixmap = pixmap
self.update()
return True
class VideoLabel(QtWidgets.QLabel):
def __init__(self, *args, **kwargs):
self.url = None
self.refreshInterval = 3
self.thread = None
self.authToken = None
super().__init__(*args, *kwargs)
def setBackgroundUrl(self, url, refreshInterval=3, authToken=None):
self.url = url
self.refreshInterval = refreshInterval
self.authToken = authToken
return True
def start(self):
if self.thread is None or not self.thread.isRunning:
self.thread = DownloadThread(self.url, refreshInterval=self.refreshInterval, authToken=self.authToken)
self.thread.data_downloaded.connect(self.setBackgroundImage)
self.thread.start()
elif self.thread is not None:
self.thread.isRunning = True
return True
def stop(self):
if self.thread is not None:
self.thread.isRunning = False
def setBackgroundImage(self, data):
pixmap = QtGui.QPixmap()
pixmap.loadFromData(data)
pixmap = pixmap.scaled(self.width(), self.height(), QtCore.Qt.KeepAspectRatio)
self.setPixmap(pixmap)
return True
class Switch(QAbstractButton):
def __init__(self, parent=None, track_radius=10, thumb_radius=8):
super().__init__(parent=parent)
self.setCheckable(True)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self._track_radius = track_radius
self._thumb_radius = thumb_radius
self._margin = max(0, self._thumb_radius - self._track_radius)
self._base_offset = max(self._thumb_radius, self._track_radius)
self._end_offset = {
True: lambda: self.width() - self._base_offset,
False: lambda: self._base_offset,
}
self._offset = self._base_offset
palette = self.palette()
palette.setColor(QtGui.QPalette.Highlight, QtGui.QColor(48,148,198))
palette.setColor(QtGui.QPalette.HighlightedText, Qt.white)
palette.setColor(QtGui.QPalette.Light, Qt.white)
palette.setColor(QtGui.QPalette.Dark, QtGui.QColor(159,159,159))
if self._thumb_radius > self._track_radius:
self._track_color = {
True: palette.highlight(),
False: palette.dark(),
}
self._thumb_color = {
True: palette.highlight(),
False: palette.light(),
}
self._text_color = {
True: palette.highlightedText().color(),
False: palette.dark().color(),
}
self._thumb_text = {
True: '',
False: '',
}
self._track_opacity = 0.5
else:
self._thumb_color = {
True: palette.highlightedText(),
False: palette.light(),
}
self._track_color = {
True: palette.highlight(),
False: palette.dark(),
}
self._text_color = {
True: palette.highlight().color(),
False: palette.dark().color(),
}
self._thumb_text = {
True: '✔',
False: '✕',
}
self._track_opacity = 1
@pyqtProperty(int)
def offset(self):
return self._offset
@offset.setter
def offset(self, value):
self._offset = value
self.update()
def sizeHint(self): # pylint: disable=invalid-name
return QSize(
4 * self._track_radius + 2 * self._margin,
2 * self._track_radius + 2 * self._margin,
)
def setChecked(self, checked):
super().setChecked(checked)
self.offset = self._end_offset[checked]()
def resizeEvent(self, event):
super().resizeEvent(event)
self.offset = self._end_offset[self.isChecked()]()
def paintEvent(self, event): # pylint: disable=invalid-name, unused-argument
p = QPainter(self)
p.setRenderHint(QPainter.Antialiasing, True)
p.setPen(Qt.NoPen)
track_opacity = self._track_opacity
thumb_opacity = 1.0
text_opacity = 1.0
if self.isEnabled():
track_brush = self._track_color[self.isChecked()]
thumb_brush = self._thumb_color[self.isChecked()]
text_color = self._text_color[self.isChecked()]
else:
track_opacity *= 0.8
track_brush = self.palette().shadow()
thumb_brush = self.palette().mid()
text_color = self.palette().shadow().color()
p.setBrush(track_brush)
p.setOpacity(track_opacity)
p.drawRoundedRect(
self._margin,
self._margin,
self.width() - 2 * self._margin,
self.height() - 2 * self._margin,
self._track_radius,
self._track_radius,
)
p.setBrush(thumb_brush)
p.setOpacity(thumb_opacity)
p.drawEllipse(
self.offset - self._thumb_radius,
self._base_offset - self._thumb_radius,
2 * self._thumb_radius,
2 * self._thumb_radius,
)
p.setPen(text_color)
p.setOpacity(text_opacity)
font = p.font()
font.setPixelSize(1.5 * self._thumb_radius)
p.setFont(font)
p.drawText(
QRectF(
self.offset - self._thumb_radius,
self._base_offset - self._thumb_radius,
2 * self._thumb_radius,
2 * self._thumb_radius,
),
Qt.AlignCenter,
self._thumb_text[self.isChecked()],
)
def mouseReleaseEvent(self, event): # pylint: disable=invalid-name
super().mouseReleaseEvent(event)
if event.button() == Qt.LeftButton:
anim = QPropertyAnimation(self, b'offset', self)
anim.setDuration(120)
anim.setStartValue(self.offset)
anim.setEndValue(self._end_offset[self.isChecked()]())
anim.start()
def enterEvent(self, event): # pylint: disable=invalid-name
self.setCursor(Qt.PointingHandCursor)
super().enterEvent(event)
|
the-stack_0_21881 | #!/usr/bin/env python
"""
Sith attacks (and helps debugging) Jedi.
Randomly search Python files and run Jedi on it. Exception and used
arguments are recorded to ``./record.json`` (specified by --record)::
./sith.py random /path/to/sourcecode
Redo recorded exception::
./sith.py redo
Show recorded exception::
./sith.py show
Run a specific operation
./sith.py run <operation> </path/to/source/file.py> <line> <col>
Where operation is one of complete, goto, infer, get_references or get_signatures.
Note: Line numbers start at 1; columns start at 0 (this is consistent with
many text editors, including Emacs).
Usage:
sith.py [--pdb|--ipdb|--pudb] [-d] [-n=<nr>] [-f] [--record=<file>] random [-s] [<path>]
sith.py [--pdb|--ipdb|--pudb] [-d] [-f] [--record=<file>] redo
sith.py [--pdb|--ipdb|--pudb] [-d] [-f] run <operation> <path> <line> <column>
sith.py show [--record=<file>]
sith.py -h | --help
Options:
-h --help Show this screen.
--record=<file> Exceptions are recorded in here [default: record.json].
-f, --fs-cache By default, file system cache is off for reproducibility.
-n, --maxtries=<nr> Maximum of random tries [default: 100]
-d, --debug Jedi print debugging when an error is raised.
-s Shows the path/line numbers of every completion before it starts.
--pdb Launch pdb when error is raised.
--ipdb Launch ipdb when error is raised.
--pudb Launch pudb when error is raised.
"""
from docopt import docopt # type: ignore[import]
import json
import os
import random
import sys
import traceback
import jedi
class SourceFinder(object):
_files = None
@staticmethod
def fetch(file_path):
if not os.path.isdir(file_path):
yield file_path
return
for root, dirnames, filenames in os.walk(file_path):
for name in filenames:
if name.endswith('.py'):
yield os.path.join(root, name)
@classmethod
def files(cls, file_path):
if cls._files is None:
cls._files = list(cls.fetch(file_path))
return cls._files
class TestCase(object):
def __init__(self, operation, path, line, column, traceback=None):
if operation not in self.operations:
raise ValueError("%s is not a valid operation" % operation)
# Set other attributes
self.operation = operation
self.path = path
self.line = line
self.column = column
self.traceback = traceback
@classmethod
def from_cache(cls, record):
with open(record) as f:
args = json.load(f)
return cls(*args)
# Changing this? Also update the module docstring above.
operations = ['complete', 'goto', 'infer', 'get_references', 'get_signatures']
@classmethod
def generate(cls, file_path):
operation = random.choice(cls.operations)
path = random.choice(SourceFinder.files(file_path))
with open(path) as f:
source = f.read()
lines = source.splitlines()
if not lines:
lines = ['']
line = random.randint(1, len(lines))
line_string = lines[line - 1]
line_len = len(line_string)
if line_string.endswith('\r\n'):
line_len -= 1
if line_string.endswith('\n'):
line_len -= 1
column = random.randint(0, line_len)
return cls(operation, path, line, column)
def run(self, debugger, record=None, print_result=False):
try:
with open(self.path) as f:
self.script = jedi.Script(f.read(), path=self.path)
kwargs = {}
if self.operation == 'goto':
kwargs['follow_imports'] = random.choice([False, True])
self.objects = getattr(self.script, self.operation)(self.line, self.column, **kwargs)
if print_result:
print("{path}: Line {line} column {column}".format(**self.__dict__))
self.show_location(self.line, self.column)
self.show_operation()
except Exception:
self.traceback = traceback.format_exc()
if record is not None:
call_args = (self.operation, self.path, self.line, self.column, self.traceback)
with open(record, 'w') as f:
json.dump(call_args, f)
self.show_errors()
if debugger:
einfo = sys.exc_info()
pdb = __import__(debugger)
if debugger == 'pudb':
pdb.post_mortem(einfo[2], einfo[0], einfo[1])
else:
pdb.post_mortem(einfo[2])
exit(1)
def show_location(self, lineno, column, show=3):
# Three lines ought to be enough
lower = lineno - show if lineno - show > 0 else 0
prefix = ' |'
for i, line in enumerate(self.script._code.split('\n')[lower:lineno]):
print(prefix, lower + i + 1, line)
print(prefix, ' ' * (column + len(str(lineno))), '^')
def show_operation(self):
print("%s:\n" % self.operation.capitalize())
if self.operation == 'complete':
self.show_completions()
else:
self.show_definitions()
def show_completions(self):
for completion in self.objects:
print(completion.name)
def show_definitions(self):
for completion in self.objects:
print(completion.full_name)
if completion.module_path is None:
continue
if os.path.abspath(completion.module_path) == os.path.abspath(self.path):
self.show_location(completion.line, completion.column)
def show_errors(self):
sys.stderr.write(self.traceback)
print(("Error with running Script(...).{operation}() with\n"
"\tpath: {path}\n"
"\tline: {line}\n"
"\tcolumn: {column}").format(**self.__dict__))
def main(arguments):
debugger = 'pdb' if arguments['--pdb'] else \
'ipdb' if arguments['--ipdb'] else \
'pudb' if arguments['--pudb'] else None
record = arguments['--record']
jedi.settings.use_filesystem_cache = arguments['--fs-cache']
if arguments['--debug']:
jedi.set_debug_function()
if arguments['redo'] or arguments['show']:
t = TestCase.from_cache(record)
if arguments['show']:
t.show_errors()
else:
t.run(debugger)
elif arguments['run']:
TestCase(
arguments['<operation>'], arguments['<path>'],
int(arguments['<line>']), int(arguments['<column>'])
).run(debugger, print_result=True)
else:
for _ in range(int(arguments['--maxtries'])):
t = TestCase.generate(arguments['<path>'] or '.')
if arguments['-s']:
print('%s %s %s %s ' % (t.operation, t.path, t.line, t.column))
sys.stdout.flush()
else:
print('.', end='')
t.run(debugger, record)
sys.stdout.flush()
print()
if __name__ == '__main__':
arguments = docopt(__doc__)
main(arguments)
|
the-stack_0_21883 | """
clims.models.usertaskassignee
sentry.models.groupassignee
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django.conf import settings
from django.db import models
from django.utils import timezone
from sentry.db.models import FlexibleForeignKey, Model, sane_repr, \
BaseManager
from sentry.models.activity import Activity
from sentry.signals import issue_assigned
from sentry.utils import metrics
class WorkBatchAssigneeManager(BaseManager):
def assign(self, work_batch, assigned_to, acting_user=None):
from sentry.models import User, Team
from clims.models import WorkBatchSubscription, WorkBatchSubscriptionReason
WorkBatchSubscription.objects.subscribe_actor(
work_batch=work_batch,
actor=assigned_to,
reason=WorkBatchSubscriptionReason.assigned,
)
if isinstance(assigned_to, User):
assignee_type = 'user'
other_type = 'team'
elif isinstance(assigned_to, Team):
assignee_type = 'team'
other_type = 'user'
else:
raise AssertionError('Invalid type to assign to: %r' % type(assigned_to))
now = timezone.now()
assignee, created = WorkBatchAssignee.objects.get_or_create(
work_batch=work_batch,
defaults={
assignee_type: assigned_to,
'date_added': now,
}
)
if not created:
affected = WorkBatchAssignee.objects.filter(
work_batch=work_batch,
).exclude(**{
assignee_type: assigned_to,
}).update(**{
assignee_type: assigned_to,
other_type: None,
'date_added': now,
})
else:
affected = True
issue_assigned.send_robust(
work_batch=work_batch,
user=acting_user,
sender=self.__class__)
if affected:
activity = Activity.objects.create(
work_batch=work_batch,
type=Activity.ASSIGNED,
user=acting_user,
data={
'assignee': six.text_type(assigned_to.id),
'assigneeEmail': getattr(assigned_to, 'email', None),
'assigneeType': assignee_type,
},
)
activity.send_notification()
# TODO: Look into this
metrics.incr('work_batch.assignee.change', instance='assigned', skip_internal=True)
def deassign(self, work_batch, acting_user=None):
affected = WorkBatchAssignee.objects.filter(
work_batch=work_batch,
)[:1].count()
WorkBatchAssignee.objects.filter(
work_batch=work_batch,
).delete()
if affected > 0:
activity = Activity.objects.create(
work_batch=work_batch,
type=Activity.UNASSIGNED,
user=acting_user,
)
activity.send_notification()
metrics.incr('work_batch.assignee.change', instance='deassigned', skip_internal=True)
class WorkBatchAssignee(Model):
"""
Identifies an assignment relationship between a user/team and an
aggregated event (Group).
"""
__core__ = False
objects = WorkBatchAssigneeManager()
organization = FlexibleForeignKey('sentry.Organization', related_name="assignee_set")
work_batch = models.OneToOneField('clims.WorkBatch', related_name="assignee_set")
user = FlexibleForeignKey(
settings.AUTH_USER_MODEL,
related_name="sentry_assignee_set",
null=True)
team = FlexibleForeignKey(
'sentry.Team',
related_name="sentry_assignee_set",
null=True)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = 'clims'
db_table = 'clims_workbatchassignee'
__repr__ = sane_repr('work_batch_id', 'user_id', 'team_id')
def save(self, *args, **kwargs):
assert (
not (self.user_id is not None and self.team_id is not None)
and not (self.user_id is None and self.team_id is None)
), 'Must have Team or User, not both'
super(WorkBatchAssignee, self).save(*args, **kwargs)
def assigned_actor_id(self):
if self.user:
return u"user:{}".format(self.user_id)
if self.team:
return u"team:{}".format(self.team_id)
raise NotImplementedError("Unkown Assignee")
def assigned_actor(self):
from sentry.api.fields.actor import Actor
return Actor.from_actor_id(self.assigned_actor_id())
|
the-stack_0_21886 | def test():
assert len(instances) == 2, "You didn't get two instances"
expected_fields = {"text", "title", "stars", "aspect", "sentiment"}
assert (
instances[0].fields.keys() == expected_fields
), "You don't have the right fields in your Instance"
assert (
instances[0]["sentiment"] == "negative"
), "You didn't read the fields correctly"
assert instances[0]["aspect"] == "tutorials", "You didn't read the fields correctly"
assert (
instances[1]["sentiment"] == "positive"
), "You didn't read the fields correctly"
assert instances[1]["aspect"] == "library", "You didn't read the fields correctly"
__msg__.good("Well done!")
|
the-stack_0_21887 | from __future__ import print_function, absolute_import, division
import os
# Importing the Kratos Library
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.kratos_utilities as KratosUtils
dependencies_are_available = KratosUtils.CheckIfApplicationsAvailable("FluidDynamicsApplication")
if dependencies_are_available:
from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
class TestSerializer(KratosUnittest.TestCase):
def _prepare_test(self):
# Define a model and load the parameters
self.pre_serialized_model = KratosMultiphysics.Model()
with open(GetFilePath("auxiliar_files_for_python_unittest/parameters_files/test_serializer.json"),'r') as parameter_file:
parameters = KratosMultiphysics.Parameters(parameter_file.read())
file_name = parameters["solver_settings"]["model_import_settings"]["input_filename"].GetString()
parameters["solver_settings"]["model_import_settings"]["input_filename"].SetString(GetFilePath(file_name))
# First the model is initialized
self.pre_serialized_simulation = FluidDynamicsAnalysis(self.pre_serialized_model, parameters)
self.pre_serialized_simulation.Initialize()
# Before serializing the model, main model part is set to RESTARTED
self.main_model_part_name = parameters["solver_settings"]["model_part_name"].GetString()
self.pre_serialized_model.GetModelPart(self.main_model_part_name).ProcessInfo.SetValue(KratosMultiphysics.IS_RESTARTED,True)
serialized_model = KratosMultiphysics.StreamSerializer()
serialized_model.Save("ModelSerialization",self.pre_serialized_model)
with open(GetFilePath("auxiliar_files_for_python_unittest/parameters_files/test_serializer.json"),'r') as parameter_file:
self.project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
# Parameters are read again and input type set to use_input_model_part since the serialized model already has the mdpa loaded
self.project_parameters["solver_settings"]["model_import_settings"]["input_type"].SetString("use_input_model_part")
# Deserialize and store the new model
self.current_model = KratosMultiphysics.Model()
serialized_model.Load("ModelSerialization",self.current_model)
def _check_results(self):
pre_serialized_model_part = self.pre_serialized_model.GetModelPart(self.main_model_part_name)
pre_serialized_pressure_results = [node.GetSolutionStepValue(KratosMultiphysics.PRESSURE) for node in pre_serialized_model_part.Nodes]
pre_serialized_velocity_results = [node.GetSolutionStepValue(KratosMultiphysics.VELOCITY) for node in pre_serialized_model_part.Nodes]
serialized_model_part = self.current_model.GetModelPart(self.main_model_part_name)
serialized_pressure_results = [node.GetSolutionStepValue(KratosMultiphysics.PRESSURE) for node in serialized_model_part.Nodes]
serialized_velocity_results = [node.GetSolutionStepValue(KratosMultiphysics.VELOCITY) for node in serialized_model_part.Nodes]
# Comparing results before and after serializing
for pre_serialized_result, serialized_result in zip(pre_serialized_pressure_results,serialized_pressure_results):
self.assertAlmostEqual(pre_serialized_result, serialized_result)
for pre_serialized_result, serialized_result in zip(pre_serialized_velocity_results,serialized_velocity_results):
for value_pre_seralized, value_serialized in zip(pre_serialized_result, serialized_result):
self.assertAlmostEqual(value_pre_seralized, value_serialized)
@KratosUnittest.skipUnless(dependencies_are_available,"FluidDynamicsApplication is not available")
def test_serializer_fluid_analysis(self):
self._prepare_test()
# Solving simulation before serializing to later check the results
self.pre_serialized_simulation.RunSolutionLoop()
self.pre_serialized_simulation.Finalize()
# Solving simulation after serializing
self.serialized_simulation = FluidDynamicsAnalysis(self.current_model, self.project_parameters)
self.serialized_simulation.Run()
self._check_results()
if __name__ == '__main__':
KratosUnittest.main()
|
the-stack_0_21890 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
from typing import (
Callable,
List,
Mapping,
Set,
Dict,
Any,
Collection,
Tuple,
Union,
)
import torch
from torch import optim
from torch.distributed._sharded_tensor import ShardedTensor
OptimizerFactory = Callable[[List[torch.Tensor]], optim.Optimizer]
class KeyedOptimizer(optim.Optimizer):
"""
Takes a dict of parameters and exposes state_dict by parameter key.
"""
def __init__(
self,
params: Mapping[str, torch.Tensor],
# pyre-ignore [2]
state: Mapping[Any, Any],
param_groups: Collection[Mapping[str, Any]],
) -> None:
torch._C._log_api_usage_once(f"torchrec.optim.{self.__class__.__name__}")
# pyre-ignore [4]
self.state: Mapping[Any, Any] = state
self.param_groups: Collection[Mapping[str, Any]] = param_groups
self.params = params
self.defaults: Dict[str, Any] = {}
params_set = set(params.values())
non_param_state_keys = [key for key in self.state if key not in params_set]
if len(non_param_state_keys) > 0:
raise ValueError(
"All state keys must be params. The following keys are not: {}.".format(
non_param_state_keys
)
)
def state_dict(self) -> Dict[str, Any]:
"""
Returned state and param_groups will contain parameter keys
instead of parameter indices in torch.Optimizer.
This allows for advanced functionality like optimizer re-sharding to be implemented.
"""
state = self.state
param_groups = self.param_groups
params = self.params
param_to_key = {param: key for key, param in params.items()}
ret_state = {
param_to_key[param]: state_val for param, state_val in state.items()
}
ret_groups = []
for group in param_groups:
param_keys = []
for param in group["params"]:
param_keys.append(param_to_key[param])
ret_group = {"params": sorted(param_keys)}
for k, v in group.items():
if k != "params":
ret_group[k] = deepcopy(v)
ret_groups.append(ret_group)
return {"state": ret_state, "param_groups": ret_groups}
def post_load_state_dict(self) -> None:
pass
def load_state_dict(self, state_dict: Mapping[str, Any]) -> None:
"""
This implementation is much stricter than the one in torch.Optimizer:
it requires implementations to fully initialize their state during first optimization iteration,
and it prohibits loading an empty state into already initialized KeyedOptimizer and vise versa.
Because of introduced strictness it allows us to:
* do compatibility checks for state and param_groups, which improves usability
* avoid state duplication by directly copying into state tensors, e.g.
optimizer.step() # make sure optimizer is initialized
sd = optimizer.state_dict()
load_checkpoint(sd) # copy state directly into tensors, re-shard if needed
optimizer.load_state_dict(sd) # replace param_groups
"""
new_state = state_dict["state"]
new_param_groups = state_dict["param_groups"]
state = self.state
param_groups = self.param_groups
params = self.params
# Load state
if len(state) != len(new_state):
raise ValueError(
f"Different parameter count: {len(state)} vs {len(new_state)}"
)
for param_key, param in params.items():
if param not in state:
continue
if param_key not in new_state:
raise ValueError(f"Parameter {param_key} not found")
if len(state[param]) != len(new_state[param_key]):
raise ValueError(
f"Different state size: {len(state[param])} vs {len(new_state[param_key])}"
)
for state_key, state_val in state[param].items():
if state_key not in new_state[param_key]:
raise ValueError(
f"State key {state_key} not found for param {param_key}"
)
new_state_val = new_state[param_key][state_key]
if isinstance(state_val, torch.Tensor):
state_val.detach().copy_(new_state_val)
elif isinstance(state_val, ShardedTensor):
num_shards = len(state_val.local_shards())
num_new_shards = len(new_state_val.local_shards())
if num_shards != num_new_shards:
raise ValueError(
f"Different number of shards {num_shards} vs {num_new_shards} for {param_key}/{state_key}"
)
for shard, new_shard in zip(
state_val.local_shards(), new_state_val.local_shards()
):
shard.tensor.detach().copy_(new_shard.tensor)
else:
state[param][state_key] = deepcopy(new_state_val)
# Load param_groups.
if len(param_groups) != len(new_param_groups):
raise ValueError(
f"Different param_groups count: {len(param_groups)} vs {len(new_param_groups)}"
)
param_to_key = {param: key for key, param in params.items()}
group_map = {}
for group in param_groups:
param_keys = []
for param in group["params"]:
param_keys.append(param_to_key[param])
group_map["/".join(sorted(param_keys))] = group
new_group_map = {}
for new_group in new_param_groups:
param_keys = []
for param_key in new_group["params"]:
param_keys.append(param_key)
new_group_map["/".join(sorted(param_keys))] = new_group
for group_key, group in group_map.items():
if group_key not in new_group_map:
raise ValueError(f"Group {group_key} not found")
new_group = new_group_map[group_key]
if len(group) != len(new_group):
raise ValueError(
f"Different param_group size: {len(group)} vs {len(new_group)}"
)
for k, v in group.items():
if k not in new_group:
raise ValueError(f"Group key {k} not found for group {group_key}")
if k != "params":
group[k] = deepcopy(new_group[k])
self.post_load_state_dict()
# pyre-ignore [2]
def add_param_group(self, param_group: Any) -> None:
raise NotImplementedError()
class CombinedOptimizer(KeyedOptimizer):
"""
Combines multiple optimizers into one.
"""
def __init__(
self, optims: List[Union[KeyedOptimizer, Tuple[str, KeyedOptimizer]]]
) -> None:
self.defaults: Dict[str, Any] = {}
# Append empty optimizer key if not passed.
self._optims: List[Tuple[str, KeyedOptimizer]] = []
for key_value in optims:
if isinstance(key_value, KeyedOptimizer):
key_value = ("", key_value)
self._optims.append(key_value)
all_keys: Set[str] = set()
for opt_key, opt in self._optims:
for param_key in opt.params.keys():
new_param = CombinedOptimizer._prepend_opt_key(param_key, opt_key)
if new_param in all_keys:
raise ValueError(f"Duplicate param key {new_param}")
all_keys.add(new_param)
def __repr__(self) -> str:
ret = []
for _, opt in self._optims:
ret.append(opt.__repr__())
return ",".join(ret)
def zero_grad(self, set_to_none: bool = False) -> None:
for _, opt in self._optims:
opt.zero_grad(set_to_none=set_to_none)
# pyre-ignore [2]
def step(self, closure: Any = None) -> None:
for _, opt in self._optims:
opt.step(closure=closure)
@property
def optimizers(self) -> List[Tuple[str, KeyedOptimizer]]:
return self._optims
@staticmethod
def _prepend_opt_key(name: str, opt_key: str) -> str:
return opt_key + ("." if opt_key else "") + name
@property
def param_groups(self) -> Collection[Mapping[str, Any]]:
return [
param_group for _, opt in self._optims for param_group in opt.param_groups
]
@property
def params(self) -> Mapping[str, torch.Tensor]:
ret = {}
for opt_key, opt in self._optims:
for param_key, param in opt.params.items():
ret[CombinedOptimizer._prepend_opt_key(param_key, opt_key)] = param
return ret
@property
# pyre-ignore [3]
def state(self) -> Mapping[torch.Tensor, Any]:
ret = {}
for _, opt in self._optims:
for param, state in opt.state.items():
ret[param] = state
return ret
def post_load_state_dict(self) -> None:
for _, opt in self._optims:
opt.post_load_state_dict()
class KeyedOptimizerWrapper(KeyedOptimizer):
"""
Takes a dict of parameters and exposes state_dict by parameter key.
"""
def __init__(
self,
params: Mapping[str, torch.Tensor],
optim_factory: OptimizerFactory,
) -> None:
self._optimizer: optim.Optimizer = optim_factory(list(params.values()))
super().__init__(params, self._optimizer.state, self._optimizer.param_groups)
def zero_grad(self, set_to_none: bool = False) -> None:
self._optimizer.zero_grad()
# pyre-ignore [2]
def step(self, closure: Any = None) -> None:
self._optimizer.step(closure=closure)
class OptimizerWrapper(KeyedOptimizer):
def __init__(self, optimizer: KeyedOptimizer) -> None:
self._optimizer = optimizer
self.params: Mapping[str, torch.Tensor] = optimizer.params
# pyre-ignore [4]
self.state: Mapping[Any, Any] = optimizer.state
self.param_groups: Collection[Mapping[str, Any]] = optimizer.param_groups
def __repr__(self) -> str:
return self._optimizer.__repr__()
def zero_grad(self, set_to_none: bool = False) -> None:
self._optimizer.zero_grad(set_to_none=set_to_none)
# pyre-ignore [2]
def step(self, closure: Any = None) -> None:
self._optimizer.step(closure=closure)
# pyre-ignore [2]
def add_param_group(self, param_group: Any) -> None:
raise NotImplementedError()
def state_dict(self) -> Dict[str, Any]:
return self._optimizer.state_dict()
def post_load_state_dict(self) -> None:
self._optimizer.post_load_state_dict()
def load_state_dict(self, state_dict: Mapping[str, Any]) -> None:
self._optimizer.load_state_dict(state_dict)
# Reassign references because self._optimizer receives new state and param_group
# references after load_state_dict.
self.state = self._optimizer.state
self.param_groups = self._optimizer.param_groups
self.post_load_state_dict()
|
the-stack_0_21891 | import os
weights = [
'weights/1650524901-epoch30.pth.tar',
'weights/1650524906-epoch29.pth.tar',
'weights/1650268119-epoch29.pth.tar',
]
results = 'results/mot'
#postfix = ['_encoder_rm_last', '_encoder', '_encoder_dec_last_stride', '_encoder_agg', '_encoder_upsampler', '_encoder_upsampler_agg']
postfix = ['_encoder_rm_last', '_encoder', '_encoder_dec_last_stride', '_encoder_agg', '_encoder_upsampler', '_encoder_upsampler_agg']
for w in weights:
print(w)
print('exp', 'HOTA', 'MOTA', 'IDF1')
for p in postfix:
exp = 'dpc_' + os.path.split(w)[-1].replace('.pth.tar', '') + p
pth = os.path.join(results, exp, 'quantitative', 'pedestrian_summary.txt')
if os.path.exists(pth):
with open(pth) as f:
lines = f.readlines()
names = lines[0].split(' ')
values = lines[1].split(' ')
print(p[1:], values[names.index('HOTA')], values[names.index('MOTA')], values[names.index('IDF1')])
print('\n') |
the-stack_0_21892 | #!/usr/bin/env python
# (c) Copyright 2015 Hewlett Packard Enterprise Development LP
# (c) Copyright 2016 Barefoot Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
from opsvsi.docker import *
from opsvsi.opsvsitest import *
from opsvsiutils.systemutil import *
class CustomTopo(Topo):
'''
Topology
[2] <---> [2]
H1[h1-eth0] <---> [1] S1 [3] <---> [3] S2 <---> [h2-eth0] H2
[4] <---> [4]
'''
def build(self, hsts=2, sws=2, **_opts):
self.hsts = hsts
self.sws = sws
# Add list of hosts
for h in irange(1, hsts):
host = self.addHost( 'h%s' % h)
# Add list of switches
for s in irange(1, sws):
switch = self.addSwitch( 's%s' %s)
# Add links between nodes based on custom topo
self.addLink('h1', 's1')
self.addLink('h2', 's2')
self.addLink('s1', 's2')
self.addLink('s1', 's2')
self.addLink('s1', 's2')
class EcmpStaticRouteTest(OpsVsiTest):
def setupNet(self):
host_opts = self.getHostOpts()
switch_opts = self.getSwitchOpts()
topo = CustomTopo(hsts=2, sws=2, hopts=host_opts, sopts=switch_opts)
self.net = Mininet(topo, switch=VsiOpenSwitch,
host=Host, link=OpsVsiLink,
controller=None, build=True)
def config_check(self):
s1 = self.net.switches[ 0 ]
s2 = self.net.switches[ 1 ]
h1 = self.net.hosts[ 0 ]
h2 = self.net.hosts[ 1 ]
info("###### configuration start ######")
info("\n###### 30 second delay ######")
time.sleep(30)
# host 1 configuration
info("\n###### configuring host 1 ######")
h1.cmd("ip addr add 192.168.10.1/24 dev h1-eth0")
h1.cmd("ip route add 192.168.0.0/16 via 192.168.10.2")
# host 2 configuration
info("\n###### configuring host 2 ######")
h2.cmd("ip addr add 192.168.20.1/24 dev h2-eth0")
h2.cmd("ip route add 192.168.0.0/16 via 192.168.20.2")
## switch 1 configuration
info("\n###### configuring switch 1 ######")
s1.cmdCLI("configure terminal")
# interface 1 configuration
s1.cmdCLI("interface 1")
s1.cmdCLI("ip address 192.168.10.2/24")
s1.cmdCLI("no shutdown")
s1.cmdCLI("exit")
# interface 2 configuration
s1.cmdCLI("interface 2")
s1.cmdCLI("ip address 192.168.30.1/24")
s1.cmdCLI("no shutdown")
s1.cmdCLI("exit")
# interface 3 configuration
s1.cmdCLI("interface 3")
s1.cmdCLI("ip address 192.168.40.1/24")
s1.cmdCLI("no shutdown")
s1.cmdCLI("exit")
# interface 4 configuration
s1.cmdCLI("interface 4")
s1.cmdCLI("ip address 192.168.50.1/24")
s1.cmdCLI("no shutdown")
s1.cmdCLI("exit")
s1.cmdCLI("exit")
## switch 2 configuration
info("\n###### configuring switch 2 ######")
s2.cmdCLI("configure terminal")
# interface 1 configuration
s2.cmdCLI("interface 1")
s2.cmdCLI("ip address 192.168.20.2/24")
s2.cmdCLI("no shutdown")
s2.cmdCLI("exit")
# interface 2 configuration
s2.cmdCLI("interface 2")
s2.cmdCLI("ip address 192.168.30.2/24")
s2.cmdCLI("no shutdown")
s2.cmdCLI("exit")
# interface 3 configuration
s2.cmdCLI("interface 3")
s2.cmdCLI("ip address 192.168.40.2/24")
s2.cmdCLI("no shutdown")
s2.cmdCLI("exit")
# interface 4 configuration
s2.cmdCLI("interface 4")
s2.cmdCLI("ip address 192.168.50.2/24")
s2.cmdCLI("no shutdown")
s2.cmdCLI("exit")
s2.cmdCLI("exit")
info("\n###### configuration end ######")
def test_ecmp_static_route(self):
s1 = self.net.switches[ 0 ]
s2 = self.net.switches[ 1 ]
h1 = self.net.hosts[ 0 ]
h2 = self.net.hosts[ 1 ]
# ping h1 to h2
info("\n\n### no route between h1 and h2. ping should fail")
info('\n### Ping host1 from host2 ###\n')
ret = h1.cmd("ping -c 1 192.168.20.1")
status = parsePing(ret)
# Return code means whether the test is successful
if status:
info('Fail: Ping Passed!\n\n')
else:
info('Success: Ping Failed!\n\n')
# add a route on s1
s1.cmdCLI("configure terminal")
s1.cmdCLI("ip route 192.168.20.0/24 192.168.30.2")
s1.cmdCLI("exit")
# add a route on s2
s2.cmdCLI("configure terminal")
s2.cmdCLI("ip route 192.168.10.0/24 192.168.30.1")
s2.cmdCLI("exit")
time.sleep(2)
# ping h1 to h2
info("\n### added 1 route between h1 and h2. ping should succeed")
info('\n### Ping host1 from host2 ###\n')
ret = h1.cmd("ping -c 1 192.168.20.1")
status = parsePing(ret)
# Return code means whether the test is successful
if status:
info('Ping Passed!\n\n')
else:
info('Ping Failed!\n\n')
# add one more route on s1 to make it ecmp
s1.cmdCLI("configure terminal")
s1.cmdCLI("ip route 192.168.20.0/24 192.168.40.2")
s1.cmdCLI("exit")
# add one more route on s2 to make it ecmp
s2.cmdCLI("configure terminal")
s2.cmdCLI("ip route 192.168.10.0/24 192.168.40.1")
s2.cmdCLI("exit")
time.sleep(2)
# ping h1 to h2
info("\n### added 1 more route between h1 and h2. ping should succeed")
info('\n### Ping host1 from host2 ###\n')
ret = h1.cmd("ping -c 1 192.168.20.1")
status = parsePing(ret)
# Return code means whether the test is successful
if status:
info('Ping Passed!\n\n')
else:
info('Ping Failed!\n\n')
# add one more route on s1
s1.cmdCLI("configure terminal")
s1.cmdCLI("ip route 192.168.20.0/24 192.168.50.2")
s1.cmdCLI("exit")
# add one more route on s2
s2.cmdCLI("configure terminal")
s2.cmdCLI("ip route 192.168.10.0/24 192.168.50.1")
s2.cmdCLI("exit")
time.sleep(2)
# ping h1 to h2
info("\n### added 1 more route between h1 and h2. ping should succeed")
info('\n### Ping host1 from host2 ###\n')
ret = h1.cmd("ping -c 1 192.168.20.1")
status = parsePing(ret)
# Return code means whether the test is successful
if status:
info('Ping Passed!\n\n')
else:
info('Ping Failed!\n\n')
# delete one route on s1. still ecmp
s1.cmdCLI("configure terminal")
s1.cmdCLI("no ip route 192.168.20.0/24 192.168.30.2")
s1.cmdCLI("exit")
# delete one route on s2. still ecmp
s2.cmdCLI("configure terminal")
s2.cmdCLI("no ip route 192.168.10.0/24 192.168.30.1")
s2.cmdCLI("exit")
time.sleep(2)
# ping h1 to h2
info("\n### deleted 1 route between h1 and h2. ping should succeed")
info('\n### Ping host1 from host2 ###\n')
ret = h1.cmd("ping -c 1 192.168.20.1")
status = parsePing(ret)
# Return code means whether the test is successful
if status:
info('Ping Passed!\n\n')
else:
info('Ping Failed!\n\n')
# delete one more route on s1 to make it non-ecmp
s1.cmdCLI("configure terminal")
s1.cmdCLI("no ip route 192.168.20.0/24 192.168.40.2")
s1.cmdCLI("exit")
# delete one more route on s2 to make it non-ecmp
s2.cmdCLI("configure terminal")
s2.cmdCLI("no ip route 192.168.10.0/24 192.168.40.1")
s2.cmdCLI("exit")
time.sleep(2)
# ping h1 to h2
info("\n### deleted 1 more route between h1 and h2. ping should succeed")
info('\n### Ping host1 from host2 ###\n')
ret = h1.cmd("ping -c 1 192.168.20.1")
status = parsePing(ret)
# Return code means whether the test is successful
if status:
info('Ping Passed!\n\n')
else:
info('Ping Failed!\n\n')
# delete the final route on s1
s1.cmdCLI("configure terminal")
s1.cmdCLI("no ip route 192.168.20.0/24 192.168.50.2")
s1.cmdCLI("exit")
# delete the final route on s2
s2.cmdCLI("configure terminal")
s2.cmdCLI("no ip route 192.168.10.0/24 192.168.50.1")
s2.cmdCLI("exit")
time.sleep(2)
# ping h1 to h2
info("\n### no route between h1 and h2. ping should fail")
info('\n### Ping host1 from host2 ###\n')
ret = h1.cmd("ping -c 1 192.168.20.1")
status = parsePing(ret)
# Return code means whether the test is successful
if status:
info('Ping Passed!\n\n')
else:
info('Success: Ping Failed!\n\n')
class Test_switchd_container_ecmp_static_route:
def setup_class(cls):
Test_switchd_container_ecmp_static_route.test = EcmpStaticRouteTest()
def test_switchd_container_ecmp_static_route_config(self):
self.test.config_check()
def test_switchd_container_ecmp_static_route(self):
self.test.test_ecmp_static_route()
def teardown_class(cls):
Test_switchd_container_ecmp_static_route.test.net.stop()
def __del__(self):
del self.test
|
the-stack_0_21894 | #!/usr/bin/env python
"""
gn can only run python scripts. This launches a subprocess Node process.
The working dir of this program is out/Debug/ (AKA root_build_dir)
Before running node, we symlink js/node_modules to out/Debug/node_modules.
"""
import subprocess
import sys
import os
def symlink(target, name, target_is_dir=False):
if os.name == "nt":
from ctypes import windll, WinError
CreateSymbolicLinkW = windll.kernel32.CreateSymbolicLinkW
flags = 0x02 # SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE
if (target_is_dir):
flags |= 0x01 # SYMBOLIC_LINK_FLAG_DIRECTORY
if not CreateSymbolicLinkW(name.encode('utf-16le'),
target.encode('utf-16le'),
flags):
raise WinError()
else:
os.symlink(target, name)
js_path = os.path.dirname(os.path.realpath(__file__))
node_modules_path = os.path.join(js_path, "node_modules")
if not os.path.exists("node_modules"):
symlink(node_modules_path, "node_modules", True)
args = ["node"] + sys.argv[1:]
sys.exit(subprocess.call(args))
|
the-stack_0_21895 | from sys import argv # From the sys module import the argv object used to get arguments from command line
script, filename = argv # Here we are breaking the arguments, the script name ex15.py to script variable and the file name ex15_sample.txt to filename variable.
txt = open(filename, 'r') # Here, we are opening the filename and assigning it to the variable txt
print(f"Here's your file {filename}: ")
print(txt.read())
txt.close()
print("Now, we are appending the file")
txt = open(filename, 'r+')
txt.seek(0, 0)
txt.write("Understood!")
txt.close()
txt = open(filename, 'r')
print(f"Here's the updated {filename}: \n")
print(txt.read())
txt.close()
# print("Type the filename again: ")
# file_again = input("> ") # Asking for the file name again
#
# txt_again = open(file_again) # Opening the file and assigning it to variable txt_again
#
# print (txt_again.read()) # Reads the content of file and prints it
#
# txt_again.close()
|
the-stack_0_21898 | '''
A very simple drawing 'app' that demonstrates
custom views and saving images to the camera roll.
'''
import ui
import photos
import console
# The PathView class is responsible for tracking
# touches and drawing the current stroke.
# It is used by SketchView.
class PathView (ui.View):
def __init__(self, frame):
self.frame = frame
self.flex = 'WH'
self.path = None
self.action = None
def touch_began(self, touch):
x, y = touch.location
self.path = ui.Path()
self.path.line_width = 8.0
self.path.line_join_style = ui.LINE_JOIN_ROUND
self.path.line_cap_style = ui.LINE_CAP_ROUND
self.path.move_to(x, y)
def touch_moved(self, touch):
x, y = touch.location
self.path.line_to(x, y)
self.set_needs_display()
def touch_ended(self, touch):
# Send the current path to the SketchView:
if callable(self.action):
self.action(self)
# Clear the view (the path has now been rendered
# into the SketchView's image view):
self.path = None
self.set_needs_display()
def draw(self):
if self.path:
self.path.stroke()
# The main SketchView contains a PathView for the current
# line and an ImageView for rendering completed strokes.
# It also manages the 'Clear' and 'Save' ButtonItems that
# are shown in the title bar.
class SketchView (ui.View):
def __init__(self, width=1024, height=1024):
self.bg_color = 'white'
iv = ui.ImageView(frame=(0, 0, width, height))
pv = PathView(frame=self.bounds)
pv.action = self.path_action
self.add_subview(iv)
self.add_subview(pv)
save_button = ui.ButtonItem()
save_button.title = 'Save Image'
save_button.action = self.save_action
clear_button = ui.ButtonItem()
clear_button.title = 'Clear'
clear_button.tint_color = 'red'
clear_button.action = self.clear_action
self.right_button_items = [save_button, clear_button]
self.image_view = iv
def path_action(self, sender):
path = sender.path
old_img = self.image_view.image
width, height = self.image_view.width, self.image_view.height
with ui.ImageContext(width, height) as ctx:
if old_img:
old_img.draw()
path.stroke()
self.image_view.image = ctx.get_image()
def clear_action(self, sender):
self.image_view.image = None
def save_action(self, sender):
if self.image_view.image:
# We draw a new image here, so that it has the current
# orientation (the canvas is quadratic).
with ui.ImageContext(self.width, self.height) as ctx:
self.image_view.image.draw()
img = ctx.get_image()
photos.save_image(img)
console.hud_alert('Saved')
else:
console.hud_alert('No Image', 'error')
# We use a square canvas, so that the same image
# can be used in portrait and landscape orientation.
w, h = ui.get_screen_size()
canvas_size = max(w, h)
sv = SketchView(canvas_size, canvas_size)
sv.name = 'Sketch'
sv.present('fullscreen')
|
the-stack_0_21900 | # -*- coding:utf-8 -*-
# __author__ = '磊'
from django.shortcuts import render_to_response
from gauth.userManager import UserManager
from gauth.groupManager import GroupManager
from gauth.common import MyJSONEncoder,convert_to_int_list
from django.http import HttpResponse
from gauth.models import User
import json
def index(request):
return render_to_response('admin/user/index.html',{})
def edit(request, user_id):
if int(user_id) != 0:
usermanager = UserManager()
user = usermanager.get_bypk(user_id)
return render_to_response('admin/user/edit.html',{'user_id':user_id,'user':user})
return render_to_response('admin/user/edit.html',{'user_id':0})
def select_group(request, user_id):
"""
用户选择用户组
:param request:
:param user_id:
:return:
"""
groupmanager = GroupManager()
usermanager = UserManager()
groups = groupmanager.get_available()
user_groups = usermanager.get_groups(user_id)
groups_str = ''
for group in groups:
groups_str += '{id:'+str(group.id)+',text:\"' + group.name + '\"},'
user_groups_str = ''
for user_group in user_groups:
user_groups_str += str(user_group.pk) + ','
return render_to_response('admin/user/select_group.html',
{'user_id' : user_id, 'groups': groups_str, 'user_groups': user_groups_str[:-1]})
def save_group(request, user_id, groups):
"""
保存选择的用户组
:param request:
:param user_id:
:param groups:
:return:
"""
if request.method == 'POST':
groupmanager = GroupManager()
usermanager = UserManager()
grouplist = convert_to_int_list(groups)
already_user_groups = usermanager.get_groups(int(user_id))
already_group_ids = []
# 找出需要与该用户取消关系的用户组,删除该关系
for already_group in already_user_groups:
already_group_ids.append(already_group.id)
if grouplist.count(already_group.id) < 1:
usermanager.remove_group(int(user_id), already_group)
# 找出需要添加的关系进行添加
for group_id in grouplist:
if already_group_ids.count(group_id) < 1:
usermanager.add_grop(int(user_id), group_id)
usermanager = UserManager()
user_groups = usermanager.get_groups(user_id)
user_groups_str = [g.name for g in user_groups]
res = {'success': True, 'groups': user_groups_str}
return HttpResponse(json.dumps(res, cls=MyJSONEncoder))
else:
return HttpResponse('fail')
def update(request):
from forms import UserForm
userf = UserForm(request.POST)
if userf.is_valid() and userf.cleaned_data['password'] == userf.cleaned_data['repassword']:
user = User()
user.username = userf.cleaned_data['username']
user.email = userf.cleaned_data['email']
user.is_active = userf.cleaned_data['is_active']
user.nickname = userf.cleaned_data['nickname']
user.password = userf.cleaned_data['password']
user.phone = userf.cleaned_data['phone']
user.id = userf.cleaned_data['id']
usermanager = UserManager()
if user.id is None or user.id == 0:
if usermanager.add(user):
newuser = usermanager.get_one(user.username)
res = {'success':True,'pk':newuser.id}
else:
res = {'success':False}
else:
if usermanager.update(user):
res = {'success':True,'pk':user.id}
else:
res = {'success':False}
else:
res = {'success':False,'message':userf.errors}
return HttpResponse(json.dumps(res, cls=MyJSONEncoder))
def update_user_disable(request, user_id):
return _update_state(int(user_id),False)
def update_user_enable(request, user_id):
return _update_state(int(user_id),True)
def _update_state(pk, is_active):
usermanager = UserManager()
if usermanager.update_is_active(pk, is_active):
res = {'success':True}
else:
res = {'success':False}
return HttpResponse(json.dumps(res, cls=MyJSONEncoder))
def users_data(request):
from forms import PageForm
pagef = PageForm(request.POST)
if pagef.is_valid():
start = pagef.cleaned_data['start']
length = pagef.cleaned_data['length']
draw = pagef.cleaned_data['draw']
usermanager = UserManager()
users = usermanager.get(start, length)
count = usermanager.get_count()
else:
users = None
draw = 0
count = 0
res= _load_data(users,draw,count)
return HttpResponse(json.dumps(res, cls=MyJSONEncoder))
def _load_data(users, draw, count):
ulen = len(users)
res = {}
res['draw'] = draw
res['data'] = []
res['recordsFiltered'],res['recordsTotal'] = count,count
for i in range(ulen):
usermanager = UserManager()
user_groups = usermanager.get_groups(users[i].pk)
user_groups_str = [g.name for g in user_groups]
row = []
row.append('<input type="checkbox" name="id" value="'+str(users[i].pk)+'">')
row.append(i+1)
row.append(users[i].username)
row.append(user_groups_str)
row.append(users[i].nickname)
row.append(users[i].email)
row.append(users[i].phone)
row.append(users[i].create_time)
row.append(users[i].last_login_time)
if users[i].is_active:
row.append('<span class="label label-sm label-success">正常</span>')
else:
row.append('<span class="label label-sm label-default">禁用</span>')
action = '<a href="javascript:;" data-id="'+str(users[i].pk)+'" class="btn btn-xs default btn-editable"><i class="fa fa-edit"></i> 编辑</a>'
action += ' <a href="javascript:;" data-id="'+str(users[i].pk)+'" class="btn btn-xs default btn-editgroup"><i class="fa fa-edit"></i> 用户组</a>'
if users[i].is_active:
action += ' <a href="javascript:;" data-toggle="confirmation" data-id="'+str(users[i].pk)+'" class="btn btn-xs default btn-disable"><i class="fa fa-lock"></i>禁用</a>'
else:
action += ' <a href="javascript:;" data-toggle="confirmation" data-id="'+str(users[i].pk)+'" class="btn btn-xs default btn-enable"><i class="fa fa-unlock-alt"></i>启用</a>'
row.append(action)
res['data'].append(row)
return res |
the-stack_0_21901 | # -*- coding: utf-8 -*-
import os
# Use the scandir version of walk if possible,
# otherwise use the os module version
try:
# https://github.com/benhoyt/scandir
# Accepted into Python 3.5 stdlib
# Increases speed 2-20 times (depending on the platform and file system)
from scandir import walk
except ImportError:
# The old way with OS.
from os import walk
__VERSION__ = "0.7.1"
def null(path):
return path
def get_dirs(
directory=os.path.curdir,
depth=None,
absolute=True,
decorate=null):
"""Yield a list of directories
Args:
directory (Optional[str]): Starting directory.
Default: ```os.path.curdir```
depth (Optional[int]): Depth to recurse to.
Default: None (Infinite depth)
absolute (Optional[bool]): Return absolue path
Default: True
decorate (Optional[function]): Decorate the return path
with the given function.
Default: None (just return the path)
Yields:
str: Next directory.
Raises:
FileNotFoundError: If ```directory``` does not exist
"""
curdepth = 0
# Make sure that the directory exists.
if not os.path.exists(directory):
raise FileNotFoundError
# If absolute is specified change directory into
# an absolpte directory paths
if absolute:
directory = os.path.abspath(directory)
# Walk through the directory from the top.
for root, dirs, files in walk(directory, topdown=True):
# Increment current depth.
curdepth += 1
for dir_ in dirs:
# Ignore hidden directories.
if dir_ == "." or dir_ == "..":
continue
# Yield the current path
yield decorate(os.path.join(root, dir_))
# If depth none (infinite depth)
if depth is None:
# Just continue
continue
# If the current depth is greater than requested depth:
if curdepth >= depth:
# Break
break
def get_files(
directory=os.path.curdir,
extensions=None,
depth=None,
absolute=True,
decorate=null):
"""
Args:
directory (Optional[str]): Starting directory.
Default: ```os.path.curdir```
extensions (Optional[str]): List of extensions to yield
Case insensitive. ('.JPG' and '.jpg' are equivalent)
Default: None (All files).
depth (Optional[int]): Depth to recurse to.
Default: None (Infinite depth)
absolute (Optional[bool]): Return absolue path
Default: True
decorate (Optional[function]): Decorate the return path
with the given function.
Default: None (just return the path)
Yields:
str: List of all files found in ``directory``
Raises:
FileNotFoundError: If ```directory``` does not exist
"""
curdepth = 0
# If the given of extensions is just a string.
# Turn it into a set so that 'in' can be used.
if isinstance(extensions, str):
extensions = {extensions}
# Convert all extensions to lowercase.
if extensions is not None:
extensions = [extension.lower() for extension in extensions]
# If absolute is specified change directory into
# an absolpte directory path
if absolute:
directory = os.path.abspath(directory)
# Make sure that the directory exists.
if not os.path.exists(directory):
raise FileNotFoundError
# Walk the directory, starting at the top.
for root, dirs, files in walk(directory, topdown=True):
# Increment the current depth.
curdepth += 1
# Loop through each of the files.
for name in files:
# Join the root and the name.
file = os.path.join(root, name)
# If extensions is not none.
if extensions is not None:
# Get the extension and convert it to lower case.
ext = os.path.splitext(file)[1].lower()
# If the file extension is in the extensions list
# yield the file
if ext in extensions:
yield decorate(file)
else:
# If extensions is None, just yield the file.
yield decorate(file)
# If depth none (infinite depth)
if depth is None:
# Just continue
continue
# If the current depth is greater than requested depth:
if curdepth >= depth:
# Break
break
|
the-stack_0_21902 |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
An RPC server exposes a number of endpoints, each of which contain a set of
methods which may be invoked remotely by clients over a given transport.
To create an RPC server, you supply a transport, target and a list of
endpoints.
A transport can be obtained simply by calling the get_transport() method::
transport = messaging.get_transport(conf)
which will load the appropriate transport driver according to the user's
messaging configuration. See get_transport() for more details.
The target supplied when creating an RPC server expresses the topic, server
name and - optionally - the exchange to listen on. See Target for more details
on these attributes.
Each endpoint object may have a target attribute which may have namespace and
version fields set. By default, we use the 'null namespace' and version 1.0.
Incoming method calls will be dispatched to the first endpoint with the
requested method, a matching namespace and a compatible version number.
RPC servers have start(), stop() and wait() messages to begin handling
requests, stop handling requests and wait for all in-process requests to
complete.
A simple example of an RPC server with multiple endpoints might be::
from oslo_config import cfg
import oslo_messaging
import time
class ServerControlEndpoint(object):
target = oslo_messaging.Target(namespace='control',
version='2.0')
def __init__(self, server):
self.server = server
def stop(self, ctx):
if self.server:
self.server.stop()
class TestEndpoint(object):
def test(self, ctx, arg):
return arg
transport = oslo_messaging.get_transport(cfg.CONF)
target = oslo_messaging.Target(topic='test', server='server1')
endpoints = [
ServerControlEndpoint(None),
TestEndpoint(),
]
server = oslo_messaging.get_rpc_server(transport, target, endpoints,
executor='blocking')
try:
server.start()
while True:
time.sleep(1)
except KeyboardInterrupt:
print("Stopping server")
server.stop()
server.wait()
Clients can invoke methods on the server by sending the request to a topic and
it gets sent to one of the servers listening on the topic, or by sending the
request to a specific server listening on the topic, or by sending the request
to all servers listening on the topic (known as fanout). These modes are chosen
via the server and fanout attributes on Target but the mode used is transparent
to the server.
The first parameter to method invocations is always the request context
supplied by the client.
Parameters to the method invocation are primitive types and so must be the
return values from the methods. By supplying a serializer object, a server can
deserialize a request context and arguments from - and serialize return values
to - primitive types.
"""
from oslo_messaging.rpc.state import RPCStateEndpoint
__all__ = [
'get_rpc_server',
'expected_exceptions',
]
import logging
import sys
from oslo_messaging._i18n import _LE
from oslo_messaging.rpc import dispatcher as rpc_dispatcher
from oslo_messaging import server as msg_server
LOG = logging.getLogger(__name__)
class RPCServer(msg_server.MessageHandlingServer):
def __init__(self, transport, target, dispatcher, executor='blocking'):
super(RPCServer, self).__init__(transport, dispatcher, executor)
self._target = target
def _create_listener(self):
return self.transport._listen(self._target, 1, None)
def _process_incoming(self, incoming):
message = incoming[0]
try:
message.acknowledge()
except Exception:
LOG.exception(_LE("Can not acknowledge message. Skip processing"))
return
failure = None
try:
res = self.dispatcher.dispatch(message)
except rpc_dispatcher.ExpectedException as e:
failure = e.exc_info
LOG.debug(u'Expected exception during message handling (%s)', e)
except Exception:
# current sys.exc_info() content can be overriden
# by another exception raised by a log handler during
# LOG.exception(). So keep a copy and delete it later.
failure = sys.exc_info()
LOG.exception(_LE('Exception during message handling'))
try:
if failure is None:
message.reply(res)
else:
message.reply(failure=failure)
except Exception:
LOG.exception(_LE("Can not send reply for message"))
finally:
# NOTE(dhellmann): Remove circular object reference
# between the current stack frame and the traceback in
# exc_info.
del failure
def get_rpc_server(transport, target, endpoints,
executor='blocking', serializer=None):
"""Construct an RPC server.
The executor parameter controls how incoming messages will be received and
dispatched. By default, the most simple executor is used - the blocking
executor.
If the eventlet executor is used, the threading and time library need to be
monkeypatched.
:param transport: the messaging transport
:type transport: Transport
:param target: the exchange, topic and server to listen on
:type target: Target
:param endpoints: a list of endpoint objects
:type endpoints: list
:param executor: name of a message executor - for example
'eventlet', 'blocking'
:type executor: str
:param serializer: an optional entity serializer
:type serializer: Serializer
"""
dispatcher = rpc_dispatcher.RPCDispatcher(endpoints, serializer)
server = RPCServer(transport, target, dispatcher, executor)
state_endpoint = RPCStateEndpoint(server, target)
dispatcher.register_state_endpoint(state_endpoint)
return server
def expected_exceptions(*exceptions):
"""Decorator for RPC endpoint methods that raise expected exceptions.
Marking an endpoint method with this decorator allows the declaration
of expected exceptions that the RPC server should not consider fatal,
and not log as if they were generated in a real error scenario.
Note that this will cause listed exceptions to be wrapped in an
ExpectedException, which is used internally by the RPC sever. The RPC
client will see the original exception type.
"""
def outer(func):
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
# Take advantage of the fact that we can catch
# multiple exception types using a tuple of
# exception classes, with subclass detection
# for free. Any exception that is not in or
# derived from the args passed to us will be
# ignored and thrown as normal.
except exceptions:
raise rpc_dispatcher.ExpectedException()
return inner
return outer
|
the-stack_0_21904 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pybind11 documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 11 19:23:48 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import subprocess
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe']
breathe_projects = {'pybind11': '.build/doxygenxml/'}
breathe_default_project = 'pybind11'
breathe_domain_by_extension = {'h': 'cpp'}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pybind11'
copyright = '2017, Wenzel Jakob'
author = 'Wenzel Jakob'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.3'
# The full version, including alpha/beta/rc tags.
release = '2.3.dev0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build', 'release.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_context = {
'css_files': [
'_static/theme_overrides.css'
]
}
else:
html_context = {
'css_files': [
'//media.readthedocs.org/css/sphinx_rtd_theme.css',
'//media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/theme_overrides.css'
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and ctpes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pybind11doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '\DeclareUnicodeCharacter{00A0}{}',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pybind11.tex', 'pybind11 Documentation',
'Wenzel Jakob', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = 'pybind11-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pybind11', 'pybind11 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pybind11', 'pybind11 Documentation',
author, 'pybind11', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
primary_domain = 'cpp'
highlight_language = 'cpp'
def generate_doxygen_xml(app):
build_dir = os.path.join(app.confdir, '.build')
if not os.path.exists(build_dir):
os.mkdir(build_dir)
try:
subprocess.call(['doxygen', '--version'])
retcode = subprocess.call(['doxygen'], cwd=app.confdir)
if retcode < 0:
sys.stderr.write("doxygen error code: {}\n".format(-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: {}\n".format(e))
def setup(app):
"""Add hook for building doxygen xml when needed"""
app.connect("builder-inited", generate_doxygen_xml)
|
the-stack_0_21907 | #!/usr/bin/env python
'''
Created on Apr 29, 2012
'''
from __future__ import division
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Apr 29, 2012"
import os
import glob
from fabric.api import local, lcd
from fabric.state import env
def make_doc():
with lcd("docs"):
local("sphinx-apidoc -o . -f ../pyhull")
local("rm pyhull*tests.rst")
for f in glob.glob("docs/*.rst"):
if f.startswith('docs/pyhull') and f.endswith('rst'):
newoutput = []
suboutput = []
subpackage = False
with open(f, 'r') as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("pyhull") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, 'w') as fid:
fid.write("".join(newoutput))
local("make html")
local("cp favicon.png _build/html/static")
def publish():
local("python setup.py release")
def test():
local("nosetests")
def setver():
from pyhull import __version__
local("sed s/version=.*,/version=\\\"{}\\\",/ setup.py > newsetup".format(__version__))
local("mv newsetup setup.py")
def release():
setver()
test()
make_doc()
publish()
|
the-stack_0_21911 | import sublime
import sublime_plugin
import re
import sys
class TableToOutlineCommand(sublime_plugin.TextCommand):
def run(self, edit):
# array for headers
headerIndex = []
headerElement = []
headerAll = ''
# array for contents
content = []
contentAll = ''
# getting copied area. this "sel_area" is array
sel_area = self.view.sel()
# if there is no selected area or are more than 2 selected area
if (sel_area[0].empty() or len(sel_area) > 1):
sublime.message_dialog("Select any area.")
# if there is on selected area
else:
# store selected area to "region_text" which type is string.
region_text = self.view.substr(sel_area[0])
# split by '\n' into arry_data
arry_data = region_text.split('\n')
# processing headers part
h1 = TableHeaders(arry_data)
headerStr = h1.getHeaderString()
headerIndex = h1.getIndexHeaderSeparator()
headerElement = h1.getHeaderElement(headerStr,headerIndex)
headerAll = h1.makeHeader(headerElement)
print ("headerStr: ",headerStr)
print ("headerIndex: ",headerIndex)
for i in headerElement:
print ("header",i)
print ("headerAll: ",headerAll)
# processing contents part
c1 = TableContents(arry_data)
content = c1.makeContentElement()
contentAll = c1.changeContentFromTableToMarkdown(content)
print ("contentAll: ",contentAll)
# replacing selected area and inserting contents
self.view.replace(edit,sel_area[0],headerAll)
self.view.insert(edit,sel_area[0].end(),contentAll)
class TableHeaders:
def __init__(self,arry):
self.arry = arry
def getHeaderString(self):
return self.arry[0]
# get index of Header separators
def getIndexHeaderSeparator(self):
self.index_of_separator = []
self.index_of_escaped_separator = []
# detecting the index of '|'
self.index = -1
while True:
self.index = self.arry[0].find('|', self.index + 1)
if self.index == -1:
break
self.index_of_separator.append(self.index)
# print ("start=%d" % self.index )
print("self.index_of_separator: ",self.index_of_separator)
# detecting the index of '\|'
self.index = -1
while True:
self.index = self.arry[0].find('\|', self.index + 1)
if self.index == -1:
break
self.index_of_escaped_separator.append(self.index + 1)
# print ("start=%d" % self.index )
print("self.index_of_escaped_separator: ",self.index_of_escaped_separator)
self.a = set(self.index_of_separator)
self.b = set(self.index_of_escaped_separator)
self.diff = self.a.difference(self.b)
print("difference: ",self.a.difference(self.b))
return sorted(self.diff)
# get Header Element with argument of headerIndex
def getHeaderElement(self,myHeaderStr,myHeaderIndex):
myHeaderElement = []
for i in range(0,len(myHeaderIndex)-1):
index1 = myHeaderIndex[i]+1
index2 = myHeaderIndex[i+1]
myString = myHeaderStr[index1:index2].strip()
myHeaderElement.append(myString)
return myHeaderElement
# make Header texts
def makeHeader(self,myHeaderElement):
myHeader = ''
for i in range(0,len(myHeaderElement)):
myHeader += "\t"*i+"+ "+myHeaderElement[i]+"\n"
return myHeader
class TableContents:
def __init__(self,myArry):
self.arry = myArry
def makeContentElement(self):
for i in range(0,len(self.arry)):
print("self.arry: ",self.arry[i])
return self.arry[2:]
def changeContentFromTableToMarkdown(self,myContent):
self.myContent = myContent
self.myContentAll = ''
for i in self.myContent:
# splitting i by '|' to splittedContent
splittedContent = re.split(r'\|',i)[1:-1]
for j in range(0,len(splittedContent)):
if(re.match(r"^ {1,}",splittedContent[j])):
self.myContentAll += '\t'*j+'-'+splittedContent[j]+'\n'
else:
self.myContentAll += '\t'*j+'- '+splittedContent[j]+'\n'
return self.myContentAll |
the-stack_0_21912 | #!/usr/bin/env python
# Copyright (c) 2011-2019, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Gage adjustment
^^^^^^^^^^^^^^^
Concept
-------
The objective of this module is the adjustment of radar-based rainfall
estimates by rain gage observations. However, this module could also be
applied to adjust satellite rainfall by rain gage observations, remotely
sensed soil moisture patterns by ground truthing moisture sensors, or any
dense spatial point pattern which could be adjusted by sparse point
measurements (ground truth).
Basically, we only need two data sources:
- point observations (e.g. rain gage observations)
- set of (potentially irregular) unadjusted point values
(e.g. remotely sensed rainfall)
:cite:`Goudenhoofdt2009` provide an excellent overview of adjustment
procedures. The general idea is that we quantify the error of the
remotely sensed rainfall at the rain gage locations, assuming the rain
gage observation to be accurate.
The error can be assumed to be purely additive
(:class:`~wradlib.adjust.AdjustAdd`), purely multiplicative
(:class:`~wradlib.adjust.AdjustMultiply`, :class:`~wradlib.adjust.AdjustMFB`)
or a mixture of both (:class:`~wradlib.adjust.AdjustMixed`).
If the error is assumed to be heterogeneous in space
(:class:`~wradlib.adjust.AdjustAdd`, :class:`~wradlib.adjust.AdjustMultiply`,
:class:`~wradlib.adjust.AdjustMixed`), the error at the rain gage locations is
interpolated to the radar bin locations and then used to adjust (correct)
the raw radar rainfall estimates. In case of the AdjustMFB approach, though,
the multiplicative error is assumed to be homogeneous in space.
Quick start
-----------
The basic procedure consists of creating an adjustment object from the class
you want to use for adjustment. After that, you can call the object with the
actual data that is to be adjusted. The following example is using the
additive error model with default settings. ``obs_coords`` and
``raw_coords`` represent arrays with coordinate pairs for the gage
observations and the radar bins, respectively. ``obs`` and ``raw`` are
arrays containing the actual data::
adjuster = AdjustAdd(obs_coords, raw_coords)
adjusted = adjuster(obs, raw)
Both ``obs`` and ``raw`` need to be flat (1-dimensional) arrays of shape (n,)
that have the same length as the the ``obs_coords`` and ``raw_coords`` arrays,
respectively.
The user can specify the approach that should be used to interpolate the error
in space, as well as the keyword arguments which control the behaviour of the
interpolation approach. For this purpose, all interpolation classes from the
:mod:`wradlib.ipol` module are available and can be passed by using the
``ipclass`` argument. The default interpolation class is
Inverse Distance Weighting (:class:`~wradlib.ipol.Idw`). If you want to use
e.g. linear barycentric interpolation::
import wradlib.ipol as ipol
adjuster = AdjustAdd(obs_coords, raw_coords, ipclass=ipol.Linear)
adjusted = adjuster(obs, raw)
Warning
-------
Be aware that there are a lot of control parameters that can dramatically
influence the behaviour of the adjustment (which gauges are considered,
how is an error interpolation carried out, ...). Read the docs carefully
and try to experiment with the effects of the different control parameters.
There might be situations in which the algorithms decides - based on the
control parameter - not to do an adjustment and just return the unadjusted
values.
Cross validation
----------------
Another helpful feature is an easy-to-use method for leave-one-out
cross-validation :cite:`Cross-validation`. Cross validation is a standard
procedure for verifying rain gage adjustment or interpolation procedures. You
can start the cross validation in the same way as you start the actual
adjustment, however, you call the :meth:`~wradlib.adjust.AdjustBase.xvalidate`
method instead. The result of the cross validation are pairs of observation
and the corresponding adjustment result at the observation location. Using the
:mod:`wradlib.verify` module, you can compute error metrics for the cross
validation results::
adjuster = AdjustAdd(obs_coords, raw_coords)
observed, estimated = adjuster.xvalidate(obs, raw)
from wradlib.verify import ErrorMetrics
metrics = ErrorMetrics(observed, estimated)
metrics.report()
.. autosummary::
:nosignatures:
:toctree: generated/
AdjustBase
AdjustMFB
AdjustMultiply
AdjustAdd
AdjustMixed
RawAtObs
GageOnly
AdjustNone
"""
import numpy as np
from scipy import spatial, stats
from wradlib import ipol, util
class AdjustBase(ipol.IpolBase):
"""The basic adjustment class that inherits to all other classes.
All methods except the :meth:`~wradlib.adjust.AdjustBase.__call__` method
are inherited to the following adjustment classes.
Parameters
----------
obs_coords : array of floats of shape (number of points, 2)
x and y coordinate pairs of observation locations (e.g. rain gauges).
raw_coords : array of floats of shape (number of points, 2)
x and y coordinate pairs of raw (unadjusted) radar field
nnear_raws : integer
Defaults to 9. This parameter controls the number of radar bins or
grid cells (in the neighbourhood of a rain gauge) which is used to
compute the value of the radar observation AT a rain gauge.
stat : string
Defaults to 'median'. Must be either 'mean', 'median', or 'best'.
This parameter controls the statistic that is used to compute the value
of the radar observation AT a rain gauge based on the neighbourhood
specified by parameter ``nnear_raws``.
mingages : integer
Defaults to 5. Minimum number of valid gages required for an
adjustment. If less valid gauges are available, the adjustment
procedure will return unadjusted raw values. If you do not want to use
this feature, you need to set ``mingages=0``.
minval : float
If the gage or radar observation is below this threshold, the location
will not be used for adjustment. For additive adjustment, this value
should be set to zero (default value). For multiplicative adjustment,
values larger than zero might be chosen in order to minimize
artifacts.
mfb_args : dictionary
**Only used for AdjustMFB** - This set of parameters controls how the
mean field bias is computed. Items of the dictionary are:
- *method*: string
defaults to 'linregr' which fits a regression line through observed
and estimated values and than gets the bias from the inverse of
the slope.
Other values: 'mean' or 'median' compute the mean or the median of
the ratios between gauge and radar observations.
- *minslope*, *minr*, *maxp*:
When using method='linregr', these parameters control whether a
linear regression turned out to be robust (minimum allowable slope,
minimum allowable correlation, maximim allowable p-value). If the
regression result is not considered robust, no adjustment will
take place.
Ipclass : an interpolation class from :mod:`wradlib.ipol`
**Not used for AdjustMFB** - default value is
:class:`~wradlib.ipol.Idw` (Inverse Distance Weighting).
ipargs : keyword arguments to create an instance of ipclass
**Not used for AdjustMFB** - for :class:`~wradlib.ipol.Idw`, these
keyword arguments would e.g. be ``nnear`` or ``p``.
Examples
--------
See :ref:`/notebooks/multisensor/wradlib_adjust_example.ipynb`.
"""
def __init__(self, obs_coords, raw_coords,
nnear_raws=9, stat='median', mingages=5, minval=0.,
mfb_args=None, ipclass=ipol.Idw, **ipargs):
# Check arguments
if mfb_args is None:
mfb_args = dict(method="linregr", minslope=0.1,
minr=0.5, maxp=0.01)
assert mfb_args["method"] in ["mean", "median", "linregr"], \
"Argument mfb_args['method'] has to be one " \
"out of 'mean', 'median' or 'linregr'."
# These are the coordinates of the rain gage locations and
# the radar bin locations
self.obs_coords = self._make_coord_arrays(obs_coords)
self.raw_coords = self._make_coord_arrays(raw_coords)
# These are the general control parameters
# for all adjustment procedures
self.nnear_raws = nnear_raws
self.stat = stat
self.mingages = mingages
self.minval = minval
# Control parameters for specific adjustment procedures
# for AdjustMFB
self.mfb_args = mfb_args
# interpolation class and its keyword arguments
# ((needed for AdjustAdd, AdjustMultiply, AdjustMixed)
self.ipclass = ipclass
self.ipargs = ipargs
# create a default instance of interpolator
self.ip = ipclass(src=self.obs_coords, trg=self.raw_coords, **ipargs)
# This method will quickly retrieve the actual radar values
# at the gage locations
self.get_raw_at_obs = RawAtObs(self.obs_coords,
self.raw_coords,
nnear=nnear_raws,
stat=stat)
def _checkip(self, ix, targets):
"""INTERNAL: Return a revised instance of the Interpolator class.
When an instance of an Adjust... class is created, an instance of the
desired
Interpolation class (argument ipclass) is created as attribute
*self.ip*). However, this instance is only valid in case all
observation points (attribute *self.obs_coords*) have valid
observation-radar pairs. In case points are missing (or in case the
instance is called in the sourse of cross validation), a new instance
has to be created which consideres the new constellation of
observation-radar pairs.
This method computes and returns this new instance.
Parameters
----------
ix : array of integers
These are the indices of observation points with valid
observation-radar pairs
targets : array of floats of shape (number of target points, 2)
Target coordinates for the interpolation
Returns
-------
output : an instance of a class that inherited from
wradlib.ipol.IpolBase
"""
# first, set interpolation targets (default: the radar coordinates)
targets_default = False
if targets is None:
targets = self.raw_coords
targets_default = True
# second, compute inverse distance neighbours
if (not len(ix) == len(self.obs_coords)) or (not targets_default):
return self.ipclass(self.obs_coords[ix], targets, **self.ipargs)
else:
return self.ip
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Parameters
----------
obs : flat (1-D) array of floats with shape (num gauges,)
These are the gage observations used for adjustment. This array
needs to be of the same length as the array "obs_coords" used to
initialize the adjustment object.
raw : flat (1-D) array of floats with shape (num radar cells,)
These are the raw (unadjusted) radar rainfall values. This array
needs to be of the same length as the array "raw_coords" used to
initialize the adjustment object.
targets : (INTERNAL - DO NOT USE)
Array of floats. Coordinate pairs for locations on which the final
adjustment product is interpolated
Defaults to None. In this case, the output locations will be
identical to the radar coordinates
rawatobs : (INTERNAL - DO NOT USE)
Array of floats. For internal use from AdjustBase.xvalidate only
(defaults to None)
ix : (INTERNAL - DO NOT USE)
Array of integers. For internal use from AdjustBase.xvalidate only
(defaults to None)
"""
pass
def _check_shape(self, obs, raw):
"""INTERNAL: Check consistency of the input data obs and raw with
the shapes of the coordinates
"""
# TODO
pass
def _get_valid_pairs(self, obs, raw):
"""INTERNAL: Helper method to identify valid obs-raw pairs
"""
# checking input shape consistency
self._check_shape(obs, raw)
# radar values at gage locations
rawatobs = self.get_raw_at_obs(raw, obs)
# check where both gage and radar observations are valid
ix = np.intersect1d(util._idvalid(obs, minval=self.minval),
util._idvalid(rawatobs, minval=self.minval))
return rawatobs, ix
def xvalidate(self, obs, raw):
"""Leave-One-Out Cross Validation, applicable to all gage adjustment
classes.
This method will be inherited to other Adjust classes. It should thus
be applicable to all adjustment procedures without any modification.
This way, the actual adjustment procedure has only to be defined *once*
in the :meth:`~wradlib.adjust.AdjustBase.__call__` method.
The output of this method can be evaluated by using the
`verify.ErrorMetrics` class.
Parameters
----------
obs : array of floats
raw : array of floats
Returns
-------
obs : array of floats
valid observations at those locations which have a valid radar
observation
estatobs : array of floats
estimated values at the valid observation locations
"""
rawatobs, ix = self._get_valid_pairs(obs, raw)
self.get_raws_directly_at_obs = RawAtObs(self.obs_coords,
self.raw_coords, nnear=1)
raws_directly_at_obs = self.get_raws_directly_at_obs(raw)
ix = np.intersect1d(ix, util._idvalid(raws_directly_at_obs,
minval=self.minval))
# Container for estimation results at the observation location
estatobs = np.zeros(obs.shape) * np.nan
# check whether enough gages remain for adjustment
if len(ix) <= (self.mingages - 1):
# not enough gages for cross validation: return empty arrays
return obs, estatobs
# Now iterate over valid pairs
for i in ix:
# Pass all valid pairs except ONE which you pass as target
ix_adjust = np.setdiff1d(ix, [i])
estatobs[i] = self.__call__(obs, raws_directly_at_obs[i],
self.obs_coords[i].reshape((1, -1)),
rawatobs, ix_adjust)
return obs, estatobs
class AdjustAdd(AdjustBase):
"""Gage adjustment using an additive error model.
First, an instance of AdjustAdd has to be created. Calling this instance
then does the actual adjustment. The motivation behind this performance.
In case the observation points are always the same for different time
steps, the computation of neighbours and inverse distance weights only
needs to be performed once.
AdjustAdd automatically takes care of invalid gage or radar observations
(e.g. NaN, Inf or other typical missing data flags such as -9999).
However, in case e.g. the observation data contains missing values, the
computation of the inverse distance weights needs to be repeated in
:meth:`~wradlib.adjust.AdjustAdd.__call__` which is at the expense of
performance.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:class:`wradlib.adjust.AdjustBase`.
Returns
-------
output : array of adjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
# Get new Interpolator instance if necessary
ip = self._checkip(ix, targets)
# -----------------THIS IS THE ACTUAL ADJUSTMENT APPROACH--------------
# The error is a difference
error = obs[ix] - rawatobs[ix]
# interpolate the error field
iperror = ip(error)
# add error field to raw and make sure no negatives occur
return np.where((raw + iperror) < 0., 0., raw + iperror)
class AdjustMultiply(AdjustBase):
"""Gage adjustment using a multiplicative error model
First, an instance of AdjustMultiply has to be created. Calling this
instance then does the actual adjustment. The motivation behind this
performance. In case the observation points are always the same for
different time steps, the computation of neighbours and inverse distance
weights only needs to be performed once during initialisation.
AdjustMultiply automatically takes care of invalid gage or radar
observations (e.g. NaN, Inf or other typical missing data flags such as
-9999). However, in case e.g. the observation data contain missing values,
the computation of the inverse distance weights needs to be repeated in
:meth:`~wradlib.adjust.AdjustMultiply.__call__` which is at the expense of
performance.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:meth:`wradlib.adjust.AdjustBase`.
Returns
-------
output : array of adjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
# Get new Interpolator instance if necessary
ip = self._checkip(ix, targets)
# -----------------THIS IS THE ACTUAL ADJUSTMENT APPROACH--------------
# computing the error
error = obs[ix] / rawatobs[ix]
# interpolate error field
iperror = ip(error)
# multiply error field with raw
return iperror * raw
class AdjustMixed(AdjustBase):
"""Gage adjustment using a mixed error model (additive and multiplicative).
The mixed error model assumes that you have both a multiplicative and an
additive error term. The intention is to overcome the drawbacks of the
purely additive and multiplicative approaches (see
:class:`~wradlib.adjust.AdjustAdd` and
:class:`~wradlib.adjust.AdjustMultiply`). The formal representation of the
error model according to :cite:`Pfaff2010` is:
.. math::
R_{gage} = R_{radar} \\cdot (1 + \\delta) +0 \\epsilon
:math:`\\delta` and :math:`\\epsilon` have to be assumed to be independent
and normally distributed. The present implementation is based on a Least
Squares estimation of :math:`\\delta` and :math:`\\epsilon` for each rain
gage location. :math:`\\delta` and :math:`\\epsilon` are then interpolated
and used to correct the radar rainfall field.
The least squares implementation uses the equation for the error model plus
the condition to minimize (:math:`\\delta^2 + \\epsilon^2`) for each gage
location. The idea behind this is that :math:`\\epsilon` dominates the
adjustment for small deviations between radar and gage while
:math:`\\delta` dominates in case of large deviations.
**Usage**:
First, an instance of AdjustMixed has to be created. Calling this instance
then does the actual adjustment. The motivation behind this is performance.
In case the observation points are always the same for different time
steps, the computation of neighbours and inverse distance weights only
needs to be performed once during initialisation.
AdjustMixed automatically takes care of invalid gage or radar observations
(e.g. NaN, Inf or other typical missing data flags such as -9999).
However, in case e.g. the observation data contain missing values, the
computation of the inverse distance weights needs to be repeated in
:func:`~wradlib.adjust.AdjustMixed.__call__` which is at the expense of
performance.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:class:`wradlib.adjust.AdjustBase`.
Returns
-------
output : array of adjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
# Get new Interpolator instance if necessary
ip = self._checkip(ix, targets)
# -----------------THIS IS THE ACTUAL ADJUSTMENT APPROACH--------------
# computing epsilon and delta from least squares
epsilon = (obs[ix] - rawatobs[ix]) / (rawatobs[ix] ** 2 + 1.)
delta = ((obs[ix] - epsilon) / rawatobs[ix]) - 1.
# interpolate error fields
ipepsilon = ip(epsilon)
ipdelta = ip(delta)
# compute adjusted radar rainfall field
return (1. + ipdelta) * raw + ipepsilon
class AdjustMFB(AdjustBase):
"""Multiplicative gage adjustment using *one* correction factor for the \
entire domain.
This method is also known as the Mean Field Bias correction.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:class:`wradlib.adjust.AdjustBase`.
Returns
-------
output : array of adjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
# # Get new Interpolator instance if necessary
# ip = self._checkip(ix, targets)
# -----------------THIS IS THE ACTUAL ADJUSTMENT APPROACH--------------
# compute ratios for each valid observation point
ratios = np.ma.masked_invalid(obs[ix] / rawatobs.ravel()[ix])
if len(np.where(np.logical_not(ratios.mask))[0]) < self.mingages:
# Not enough valid pairs of raw and obs
return raw
if self.mfb_args["method"] == "mean":
corrfact = np.mean(ratios)
elif self.mfb_args["method"] == "median":
corrfact = np.median(ratios)
elif self.mfb_args["method"] == "linregr":
corrfact = 1.
ix_ = np.where(np.logical_not(ratios.mask))[0]
x = obs[ix][ix_]
y = rawatobs[ix][ix_]
# check whether we should adjust or not
try:
slope, intercept, r, p, stderr = stats.linregress(x, y)
except Exception:
slope, r, p = 0, 0, np.inf
if (slope > self.mfb_args["minslope"]) and \
(r > self.mfb_args["minr"]) and \
(p < self.mfb_args["maxp"]):
x = x[:, np.newaxis]
try:
slope, _, _, _ = np.linalg.lstsq(x, y)
if not slope[0] == 0:
corrfact = 1. / slope[0]
except Exception:
# no correction if linear regression fails
pass
if type(corrfact) == np.ma.core.MaskedConstant:
corrfact = 1.
return corrfact * raw
class AdjustNone(AdjustBase):
"""Same behaviour as the other adjustment classes, but simply returns the \
unadjusted data.
This class can be used for benchmark verification experiments as a control
for unadjusted data.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:class:`wradlib.adjust.AdjustBase`.
Returns
-------
output : array of unadjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
return raw
class GageOnly(AdjustBase):
"""Same behaviour as the other adjustment classes, but returns an \
interpolation of rain gage observations
First, an instance of GageOnly has to be created. Calling this instance
then does the actual adjustment. The motivation behind this performance.
In case the observation points are always the same for different time
steps, the computation of neighbours and inverse distance weights only
needs to be performed once during initialisation.
GageOnly automatically takes care of invalid gage or radar observations
(e.g. NaN, Inf or other typical missing data flags such as -9999).
However, in case e.g. the observation data contain missing values, the
computation of the inverse distance weights needs to be repeated in
:func:`~wradlib.adjust.GageOnly.__call__` which is at the expense of
performance.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:class:`wradlib.adjust.AdjustBase`.
Returns
-------
output : array of adjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
# Get new Interpolator instance if necessary
ip = self._checkip(ix, targets)
# -----------------THIS IS THE ACTUAL ADJUSTMENT APPROACH--------------
# interpolate gage observations
return ip(obs[ix])
class RawAtObs():
"""Get the raw values in the neighbourhood of the observation points
Parameters
----------
obs_coords : array of float
coordinate pairs of observations points
raw_coords : array of float
coordinate pairs of raw (unadjusted) field
nnear: integer
number of neighbours which should be considered in the vicinity of each
point in obs
stat: string
function name
"""
def __init__(self, obs_coords, raw_coords, nnear=9, stat='median'):
self.statfunc = _get_statfunc(stat)
self.raw_ix = _get_neighbours_ix(obs_coords, raw_coords, nnear)
def __call__(self, raw, obs=None):
"""
Returns the values of raw at the observation locations
Parameters
----------
raw : array of float
raw values
"""
# get the values of the raw neighbours of obs
raw_neighbs = raw[self.raw_ix]
# and summarize the values of these neighbours
# by using a statistics option
# (only needed in case nnear > 1, i.e. multiple neighbours
# per observation location)
if raw_neighbs.ndim > 1:
return self.statfunc(obs, raw_neighbs)
else:
return raw_neighbs
def _get_neighbours_ix(obs_coords, raw_coords, nnear):
"""Returns ``nnear`` neighbour indices per ``obs_coords`` coordinate pair
Parameters
----------
obs_coords : array of float of shape (num_points,ndim)
in the neighbourhood of these coordinate pairs we look for neighbours
raw_coords : array of float of shape (num_points,ndim)
from these coordinate pairs the neighbours are selected
nnear : integer
number of neighbours to be selected per coordinate pair of
``obs_coords``
"""
# plant a tree
tree = spatial.cKDTree(raw_coords)
# return nearest neighbour indices
return tree.query(obs_coords, k=nnear)[1]
def _get_statfunc(funcname):
"""Returns a function that corresponds to parameter ``funcname``
Parameters
----------
funcname : string
a name of a numpy function OR another option known by _get_statfunc
Potential options: 'mean', 'median', 'best'
"""
try:
# first try to find a numpy function which corresponds to <funcname>
func = getattr(np, funcname)
def newfunc(x, y):
return func(y, axis=1)
except Exception:
# then try to find a function in this module with name funcname
if funcname == 'best':
newfunc = best
else:
# if no function can be found, raise an Exception
raise NameError('Unknown function name option: ' + funcname)
return newfunc
def best(x, y):
"""Find the values of y which corresponds best to x
If x is an array, the comparison is carried out for each element of x
Parameters
----------
x : float or 1-d array of float
y : array of float
Returns
-------
output : 1-d array of float with length len(y)
"""
if type(x) == np.ndarray:
assert x.ndim == 1, 'x must be a 1-d array of floats or a float.'
assert len(x) == len(y), 'Length of x and y must be equal.'
if type(y) == np.ndarray:
assert y.ndim <= 2, 'y must be 1-d or 2-d array of floats.'
else:
raise ValueError('y must be 1-d or 2-d array of floats.')
x = np.array(x).reshape((-1, 1))
if y.ndim == 1:
y = np.array(y).reshape((1, -1))
axis = None
else:
axis = 1
return y[np.arange(len(y)), np.argmin(np.abs(x - y), axis=axis)]
if __name__ == '__main__':
print('wradlib: Calling module <adjust> as main...')
|
the-stack_0_21914 | # -*- coding: utf-8 -*-
"""
# @file name : model_trainer.py
# @author : https://github.com/zjgbz
# @date : 2020-08-03
# @brief : 模型训练类
"""
import torch
import numpy as np
from collections import Counter
from tools.mixup import mixup_criterion, mixup_data
from sklearn.metrics import roc_auc_score, precision_recall_fscore_support
class ModelTrainer(object):
@staticmethod
def train(data_loader, model, loss_f, optimizer, scheduler, epoch_idx, device, cfg, logger):
model.train()
class_num = data_loader.dataset.cls_num
conf_mat = np.zeros((class_num, class_num))
loss_sigma = []
loss_mean = 0
acc_avg = 0
path_error = []
label_list = []
pred_list = []
for i, data in enumerate(data_loader):
# _, labels = data
inputs, labels, path_imgs = data
label_list.extend(labels.tolist())
inputs, labels = inputs.to(device), labels.to(device)
# mixup
if cfg.mixup:
mixed_inputs, label_a, label_b, lam = mixup_data(inputs, labels, cfg.mixup_alpha, device)
inputs = mixed_inputs
# forward & backward
outputs = model(inputs)
optimizer.zero_grad()
# loss 计算
if cfg.mixup:
loss = mixup_criterion(loss_f, outputs.cpu(), label_a.cpu(), label_b.cpu(), lam)
else:
loss = loss_f(outputs.cpu(), labels.cpu())
loss.backward()
optimizer.step()
# 统计loss
loss_sigma.append(loss.item())
loss_mean = np.mean(loss_sigma)
#
_, predicted = torch.max(outputs.data, 1)
pred_list.extend(predicted.tolist())
for j in range(len(labels)):
cate_i = labels[j].cpu().numpy()
pre_i = predicted[j].cpu().numpy()
conf_mat[cate_i, pre_i] += 1.
if cate_i != pre_i:
path_error.append((cate_i, pre_i, path_imgs[j])) # 记录错误样本的信息
acc_avg = conf_mat.trace() / conf_mat.sum()
# AUROC
labels_array = np.asarray(label_list)
pred_array = np.asarray(pred_list)
# auroc = roc_auc_score(labels_array, pred_array)
prec_macro, recall_macro, F1_macro, _ = precision_recall_fscore_support(labels_array, pred_array, average = 'macro', zero_division = 0)
prec_micro, recall_micro, F1_micro, _ = precision_recall_fscore_support(labels_array, pred_array, average = 'micro', zero_division = 0)
# 每10个iteration 打印一次训练信息
if i % cfg.log_interval == cfg.log_interval - 1:
# logger.info(
# (f"Training: Epoch[{epoch_idx + 1:0>3}/{cfg.max_epoch:0>3}] Iteration[{i + 1:0>3}/{len(data_loader):0>3}] "
# f"Loss: {loss_mean:.4f} Acc:{acc_avg:.2%} Precision(macro):{prec_macro:.4f} Recall(macro):{recall_macro:.4f} "
# f"F1(macro):{F1_macro:.4f} Precision(micro):{prec_micro:.4f} Recall(micro):{recall_micro:.4f} F1(micro):{F1_micro:.4f}")
# )
logger.info(
(f"Training: Epoch[{epoch_idx + 1:0>3}/{cfg.max_epoch:0>3}] Iteration[{i + 1:0>3}/{len(data_loader):0>3}] "
f"Loss: {loss_mean:.4f} Acc:{acc_avg:.2%} F1(macro):{F1_macro:.4f}")
)
logger.info("epoch:{} sampler: {}".format(epoch_idx, Counter(label_list)))
return loss_mean, acc_avg, prec_macro, recall_macro, F1_macro, prec_micro, recall_micro, F1_micro, conf_mat, path_error
# return loss_mean, acc_avg, conf_mat, path_error
@staticmethod
def valid(data_loader, model, loss_f, device):
model.eval()
class_num = data_loader.dataset.cls_num
conf_mat = np.zeros((class_num, class_num))
loss_sigma = []
path_error = []
label_list = []
pred_list = []
for i, data in enumerate(data_loader):
with torch.no_grad():
inputs, labels, path_imgs = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = loss_f(outputs.cpu(), labels.cpu())
# 统计混淆矩阵
_, predicted = torch.max(outputs.data, 1)
for j in range(len(labels)):
cate_i = labels[j].cpu().numpy()
pre_i = predicted[j].cpu().numpy()
conf_mat[cate_i, pre_i] += 1.
if cate_i != pre_i:
path_error.append((cate_i, pre_i, path_imgs[j])) # 记录错误样本的信息
# 统计loss
loss_sigma.append(loss.item())
label_list.extend(labels.tolist())
pred_list.extend(predicted.tolist())
# print(f"batch {i} completed.")
acc_avg = conf_mat.trace() / conf_mat.sum()
# AUROC
# print(len(label_list), len(pred_list))
labels_array = np.asarray(label_list)
pred_array = np.asarray(pred_list)
# auroc = roc_auc_score(labels_array, pred_array)
prec_macro, recall_macro, F1_macro, _ = precision_recall_fscore_support(labels_array, pred_array, average = 'macro', zero_division = 0)
prec_micro, recall_micro, F1_micro, _ = precision_recall_fscore_support(labels_array, pred_array, average = 'micro', zero_division = 0)
return np.mean(loss_sigma), acc_avg, prec_macro, recall_macro, F1_macro, prec_micro, recall_micro, F1_micro, conf_mat, path_error |
the-stack_0_21916 | from random import choice, shuffle
from typing import List, Tuple
from django.templatetags.static import static
from data.pictures import PICTURES, PicturesTopic
from game.constants import QUESTIONS_PER_GAME, ANSWERS_PER_QUESTION
from game.models import Game, Question
def create_picture_questions(game: Game) -> List[Question]:
pictures_topic = choice(PICTURES) # nosec
questions = []
for _ in range(QUESTIONS_PER_GAME):
questions.append(
get_question_to_create(game, pictures_topic, questions)
)
return questions
def get_question_to_create(
game: Game,
pictures_topic: PicturesTopic,
existing_questions: List[Question],
) -> Question:
while True:
new_question_item = get_random_topic_item(pictures_topic)
new_question_link = static(
get_file_path(pictures_topic, new_question_item)
)
existing_question_links = [
question.question for question in existing_questions
]
if new_question_link not in existing_question_links:
answers = get_answers(pictures_topic, new_question_item)
return Question(
game=game,
question=new_question_link,
answer_words=answers,
correct_answer=new_question_item[0],
)
def get_file_path(topic: PicturesTopic, question_item: Tuple[str, str]) -> str:
topic_number = PICTURES.index(topic) + 1
dashed_topic = topic.name.replace(" ", "-")
question_item_number = topic.items.index(question_item) + 1
extension = question_item[1]
return (
f"picture-questions/{topic_number}-{dashed_topic}/"
f"{question_item_number}.{extension}"
)
def get_answers(
topic: PicturesTopic, question_item: Tuple[str, str]
) -> List[str]:
answers = [question_item[0]]
for _ in range(ANSWERS_PER_QUESTION - 1):
answers.append(get_wrong_answer(topic, answers))
shuffle(answers)
return answers
def get_wrong_answer(topic: PicturesTopic, existing_answers: List[str]) -> str:
while True:
random_item = get_random_topic_item(topic)
if random_item[0] not in existing_answers:
return random_item[0]
def get_random_topic_item(pictures_topic: PicturesTopic) -> Tuple[str, str]:
return choice(pictures_topic.items) # nosec
|
the-stack_0_21918 | # The following comments couldn't be translated into the new config version:
# E33 cm-2s-1
# mb
import FWCore.ParameterSet.Config as cms
# this is the configuration to model pileup in the low-luminosity phase
from SimGeneral.MixingModule.mixObjects_cfi import theMixObjects
from SimGeneral.MixingModule.mixPoolSource_cfi import *
from SimGeneral.MixingModule.digitizers_cfi import *
mix = cms.EDProducer("MixingModule",
digitizers = cms.PSet(theDigitizers),
LabelPlayback = cms.string(''),
maxBunch = cms.int32(3),
minBunch = cms.int32(-5), ## in terms of 25 ns
bunchspace = cms.int32(25), ## nsec
mixProdStep1 = cms.bool(False),
mixProdStep2 = cms.bool(False),
playback = cms.untracked.bool(False),
useCurrentProcessOnly = cms.bool(False),
input = cms.SecSource("EmbeddedRootSource",
nbPileupEvents = cms.PSet(
sigmaInel = cms.double(80.0),
Lumi = cms.double(2.8)
),
type = cms.string('poisson'),
sequential = cms.untracked.bool(False),
fileNames = FileNames
),
mixObjects = cms.PSet(theMixObjects)
)
|
the-stack_0_21919 |
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
dataset_path = "./train-data.csv"
column_names = ['Ind', 'Name', 'Location', 'Year', 'Kilometers_Driven',
'Fuel_Type', 'Transmission', 'Owner_Type', 'Mileage', 'Engine',
'Power', 'Seats', 'New_Price', 'Price']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t', skiprows=1, sep=",",
skipinitialspace=True)
dataset = raw_dataset.copy()
print ( dataset.head() )
dataset = dataset.drop(columns=['Ind', 'Name', 'Location', 'New_Price'])
print ( dataset.head() )
# To see a good description of the dataset
print ( dataset.describe() )
# Cleaning the data
# The dataset contains a few unknown values. Let’s find them and drop them.
dataset.isna().sum()
dataset = dataset.dropna()
dataset = dataset.reset_index(drop=True)
print ( dataset.head() )
dataset['Mileage'] = pd.Series([re.sub('[^.0-9]', '',
str(val)) for val in dataset['Mileage']], index = dataset.index)
dataset['Engine'] = pd.Series([re.sub('[^.0-9]', '',
str(val)) for val in dataset['Engine']], index = dataset.index)
dataset['Power'] = pd.Series([re.sub('[^.0-9]', '',
str(val)) for val in dataset['Power']], index = dataset.index)
# The prices are by default in INR Lakhs. So, we have to convert them to USD
dataset['Price'] = pd.Series([int(float(val)*1521.22) for val in dataset['Price']],
index = dataset.index)
print ( dataset.head() )
dataset = dataset.replace(r'^\s*$', np.nan, regex=True)
dataset.isna().sum()
dataset = dataset.dropna()
dataset = dataset.reset_index(drop=True)
print ( dataset.head() )
dataset['Mileage'] = pd.Series([int(float(str(val))*2.3521458)
for val in dataset['Mileage']], index = dataset.index)
dataset['Engine'] = pd.Series([float(str(val))
for val in dataset['Engine']], index = dataset.index)
## Lab 09 - TODO - for the column 'Power' in the dataset, convert it to a float
## Lab 09 - TODO - for the column 'Seats' in the dataset, convert it to a float
## Lab 09 - TODO - create the column 'Miles_Driven' from the column
## 'Kilometers_Driven' by converting to a float and
## Multiplying by 0.621371, then convert to an integer so
## that we don't have small fractional values.
##
## Example of Conversion in just code
## x = "23.0" # A string, with a number in it.
## r = int(float(x)*0.621371)
## # Convert from string to float,
## # Km to Mi, then back to an integer.
dataset = dataset.drop(columns=['Kilometers_Driven'])
print ( dataset.head() )
dataset.to_csv(path_or_buf="new-car-data.csv")
## One-Hot the Fule_Type
print(dataset['Fuel_Type'].unique())
dataset['Fuel_Type'] = pd.Categorical(dataset['Fuel_Type'])
dfFuel_Type = pd.get_dummies(dataset['Fuel_Type'], prefix = 'Fuel_Type')
print ( dfFuel_Type.head() )
## One-Hot the Transmission
## Lab -09 - TODO - do a similar one-hot encoding for the values in
## the Transmission column.
## Lab -09 - TODO - do a similar one-hot encoding for the values in
## the Owner_Type column.
## Concat it all together
## TODO - when you get the 2 sections above working you will need:
#### dataset = pd.concat([dataset, dfFuel_Type, dfTransmission, dfOwner_Type], axis=1)
## instead of just the dfFule_type
dataset = pd.concat([dataset, dfFuel_Type], axis=1)
dataset = dataset.drop(columns=['Owner_Type', 'Transmission', 'Fuel_Type'])
print ( dataset.head() )
# Save the data again - take a look at it.
dataset.to_csv(path_or_buf="new-car-data2.csv")
############################### ###############################
# Plot some stuff.
############################### ###############################
dataset.plot(kind='scatter',x='Price',y='Year',color='blue')
plt.show()
## Lab - 09 - TODO - Plot Price v.s. Miles_Driven
## Lab - 09 - TODO - Plot Price v.s. Power
## Lab - 09 - TODO - Plot Price v.s. Milage
## Lab - 09 - TODO - Plot Price v.s. Seats
|
the-stack_0_21921 | ###############################################################################
# SpiralArmsPotential.py: class that implements the spiral arms potential
# from Cox and Gomez (2002)
#
# https://arxiv.org/abs/astro-ph/0207635
#
# Phi(r, phi, z) = -4*pi*G*H*rho0*exp(-(r-r0)/Rs)*sum(Cn/(Kn*Dn)*cos(n*gamma)*sech(Kn*z/Bn)^Bn)
# NOTE: Methods do not take array inputs.
###############################################################################
from __future__ import division
from .Potential import Potential, _APY_LOADED, \
check_potential_inputs_not_arrays
from galpy.util import bovy_conversion
import numpy as np
if _APY_LOADED:
from astropy import units
class SpiralArmsPotential(Potential):
"""Class that implements the spiral arms potential from (`Cox and Gomez 2002 <https://arxiv.org/abs/astro-ph/0207635>`__). Should be used to modulate an existing potential (density is positive in the arms, negative outside).
.. math::
\\Phi(R, \\phi, z) = -4 \\pi GH \\,\\rho_0 exp \\left( -\\frac{R-r_{ref}}{R_s} \\right) \\sum{\\frac{C_n}{K_n D_n} \\,\cos(n \\gamma) \\,\\mathrm{sech}^{B_n} \\left( \\frac{K_n z}{B_n} \\right)}
where
.. math::
K_n &= \\frac{n N}{R \sin(\\alpha)} \\\\
B_n &= K_n H (1 + 0.4 K_n H) \\\\
D_n &= \\frac{1 + K_n H + 0.3 (K_n H)^2}{1 + 0.3 K_n H} \\\\
and
.. math::
\\gamma = N \\left[\\phi - \\phi_{ref} - \\frac{\\ln(R/r_{ref})}{\\tan(\\alpha)} \\right]
The default of :math:`C_n=[1]` gives a sinusoidal profile for the potential. An alternative from `Cox and Gomez (2002) <https://arxiv.org/abs/astro-ph/0207635>`__ creates a density that behaves approximately as a cosine squared in the arms but is separated by a flat interarm region by setting
.. math::
C_n = \\left[\\frac{8}{3 \\pi}\,,\\frac{1}{2} \\,, \\frac{8}{15 \\pi}\\right]
"""
normalize= property() # turn off normalize
def __init__(self, amp=1, ro=None, vo=None, amp_units='density',
N=2, alpha=0.2, r_ref=1, phi_ref=0, Rs=0.3, H=0.125, omega=0, Cs=[1]):
"""
NAME:
__init__
PURPOSE:
initialize a spiral arms potential
INPUT:
:amp: amplitude to be applied to the potential (default: 1);
can be a Quantity with units of density. (:math:`amp = 4 \\pi G \\rho_0`)
:ro: distance scales for translation into internal units (default from configuration file)
:vo: velocity scales for translation into internal units (default from configuration file)
:N: number of spiral arms
:alpha: pitch angle of the logarithmic spiral arms in radians (can be Quantity)
:r_ref: fiducial radius where :math:`\\rho = \\rho_0` (:math:`r_0` in the paper by Cox and Gomez) (can be Quantity)
:phi_ref: reference angle (:math:`\\phi_p(r_0)` in the paper by Cox and Gomez) (can be Quantity)
:Rs: radial scale length of the drop-off in density amplitude of the arms (can be Quantity)
:H: scale height of the stellar arm perturbation (can be Quantity)
:Cs: list of constants multiplying the :math:`\cos(n \\gamma)` terms
:omega: rotational pattern speed of the spiral arms (can be Quantity)
OUTPUT:
(none)
HISTORY:
Started - 2017-05-12 Jack Hong (UBC)
Completed - 2017-07-04 Jack Hong (UBC)
"""
Potential.__init__(self, amp=amp, ro=ro, vo=vo, amp_units=amp_units)
if _APY_LOADED:
if isinstance(alpha, units.Quantity):
alpha = alpha.to(units.rad).value
if isinstance(r_ref, units.Quantity):
r_ref = r_ref.to(units.kpc).value / self._ro
if isinstance(phi_ref, units.Quantity):
phi_ref = phi_ref.to(units.rad).value
if isinstance(Rs, units.Quantity):
Rs = Rs.to(units.kpc).value / self._ro
if isinstance(H, units.Quantity):
H = H.to(units.kpc).value / self._ro
if isinstance(omega, units.Quantity):
omega = omega.to(units.km / units.s / units.kpc).value \
/ bovy_conversion.freq_in_kmskpc(self._vo, self._ro)
self._N = -N # trick to flip to left handed coordinate system; flips sign for phi and phi_ref, but also alpha.
self._alpha = -alpha # we don't want sign for alpha to change, so flip alpha. (see eqn. 3 in the paper)
self._sin_alpha = np.sin(-alpha)
self._tan_alpha = np.tan(-alpha)
self._r_ref = r_ref
self._phi_ref = phi_ref
self._Rs = Rs
self._H = H
self._Cs = np.array(Cs)
self._ns = np.arange(1, len(Cs) + 1)
self._omega = omega
self._rho0 = 1 / (4 * np.pi)
self._HNn = self._H * self._N * self._ns
self.isNonAxi = True # Potential is not axisymmetric
self.hasC = True # Potential has C implementation to speed up orbit integrations
self.hasC_dxdv = True # Potential has C implementation of second derivatives
@check_potential_inputs_not_arrays
def _evaluate(self, R, z, phi=0, t=0):
"""
NAME:
_evaluate
PURPOSE:
Evaluate the potential at the given coordinates. (without the amp factor; handled by super class)
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: Phi(R, z, phi, t)
HISTORY:
2017-05-12 Jack Hong (UBC)
"""
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
return -self._H * np.exp(-(R-self._r_ref) / self._Rs) \
* np.sum(self._Cs / Ks / Ds * np.cos(self._ns * self._gamma(R, phi - self._omega * t)) / np.cosh(Ks * z / Bs) ** Bs)
@check_potential_inputs_not_arrays
def _Rforce(self, R, z, phi=0, t=0):
"""
NAME:
_Rforce
PURPOSE:
Evaluate the radial force for this potential at the given coordinates. (-dPhi/dR)
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the radial force
HISTORY:
2017-05-12 Jack Hong (UBC)
"""
He = self._H * np.exp(-(R-self._r_ref)/self._Rs)
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
dKs_dR = self._dK_dR(R)
dBs_dR = self._dB_dR(R)
dDs_dR = self._dD_dR(R)
g = self._gamma(R, phi - self._omega * t)
dg_dR = self._dgamma_dR(R)
cos_ng = np.cos(self._ns * g)
sin_ng = np.sin(self._ns * g)
zKB = z * Ks / Bs
sechzKB = 1 / np.cosh(zKB)
return -He * np.sum(self._Cs * sechzKB**Bs / Ds * ((self._ns * dg_dR / Ks * sin_ng
+ cos_ng * (z * np.tanh(zKB) * (dKs_dR/Ks - dBs_dR/Bs)
- dBs_dR / Ks * np.log(sechzKB)
+ dKs_dR / Ks**2
+ dDs_dR / Ds / Ks))
+ cos_ng / Ks / self._Rs))
@check_potential_inputs_not_arrays
def _zforce(self, R, z, phi=0, t=0):
"""
NAME:
_zforce
PURPOSE:
Evaluate the vertical force for this potential at the given coordinates. (-dPhi/dz)
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the vertical force
HISTORY:
2017-05-25 Jack Hong (UBC)
"""
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
zK_B = z * Ks / Bs
return -self._H * np.exp(-(R-self._r_ref) / self._Rs) \
* np.sum(self._Cs / Ds * np.cos(self._ns * self._gamma(R, phi - self._omega * t))
* np.tanh(zK_B) / np.cosh(zK_B)**Bs)
@check_potential_inputs_not_arrays
def _phiforce(self, R, z, phi=0, t=0):
"""
NAME:
_phiforce
PURPOSE:
Evaluate the azimuthal force in cylindrical coordinates. (-dPhi/dphi)
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the azimuthal force
HISTORY:
2017-05-25 Jack Hong (UBC)
"""
g = self._gamma(R, phi - self._omega * t)
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
return -self._H * np.exp(-(R-self._r_ref) / self._Rs) \
* np.sum(self._N * self._ns * self._Cs / Ds / Ks / np.cosh(z * Ks / Bs)**Bs * np.sin(self._ns * g))
@check_potential_inputs_not_arrays
def _R2deriv(self, R, z, phi=0, t=0):
"""
NAME:
_R2deriv
PURPOSE:
Evaluate the second (cylindrical) radial derivative of the potential.
(d^2 potential / d R^2)
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the second radial derivative
HISTORY:
2017-05-31 Jack Hong (UBC)
"""
Rs = self._Rs
He = self._H * np.exp(-(R-self._r_ref)/self._Rs)
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
dKs_dR = self._dK_dR(R)
dBs_dR = self._dB_dR(R)
dDs_dR = self._dD_dR(R)
R_sina = R * self._sin_alpha
HNn_R_sina = self._HNn / R_sina
HNn_R_sina_2 = HNn_R_sina**2
x = R * (0.3 * HNn_R_sina + 1) * self._sin_alpha
d2Ks_dR2 = 2 * self._N * self._ns / R**3 / self._sin_alpha
d2Bs_dR2 = HNn_R_sina / R**2 * (2.4 * HNn_R_sina + 2)
d2Ds_dR2 = self._sin_alpha / R / x * (self._HNn* (0.18 * self._HNn * (HNn_R_sina + 0.3 * HNn_R_sina_2 + 1) / x**2
+ 2 / R_sina
- 0.6 * HNn_R_sina * (1 + 0.6 * HNn_R_sina) / x
- 0.6 * (HNn_R_sina + 0.3 * HNn_R_sina_2 + 1) / x
+ 1.8 * self._HNn / R_sina**2))
g = self._gamma(R, phi - self._omega * t)
dg_dR = self._dgamma_dR(R)
d2g_dR2 = self._N / R**2 / self._tan_alpha
sin_ng = np.sin(self._ns * g)
cos_ng = np.cos(self._ns * g)
zKB = z * Ks / Bs
sechzKB = 1 / np.cosh(zKB)
sechzKB_Bs = sechzKB**Bs
log_sechzKB = np.log(sechzKB)
tanhzKB = np.tanh(zKB)
ztanhzKB = z * tanhzKB
return -He / Rs * (np.sum(self._Cs * sechzKB_Bs / Ds
* ((self._ns * dg_dR / Ks * sin_ng
+ cos_ng * (ztanhzKB * (dKs_dR/Ks - dBs_dR/Bs)
- dBs_dR / Ks * log_sechzKB
+ dKs_dR / Ks**2
+ dDs_dR / Ds / Ks))
- (Rs * (1 / Ks * ((ztanhzKB * (dBs_dR / Bs * Ks - dKs_dR)
+ log_sechzKB * dBs_dR)
- dDs_dR / Ds) * (self._ns * dg_dR * sin_ng
+ cos_ng * (ztanhzKB * Ks * (dKs_dR/Ks - dBs_dR/Bs)
- dBs_dR * log_sechzKB
+ dKs_dR / Ks
+ dDs_dR / Ds))
+ (self._ns * (sin_ng * (d2g_dR2 / Ks - dg_dR / Ks**2 * dKs_dR)
+ dg_dR**2 / Ks * cos_ng * self._ns)
+ z * (-sin_ng * self._ns * dg_dR * tanhzKB * (dKs_dR/Ks - dBs_dR/Bs)
+ cos_ng * (z * (dKs_dR/Bs - dBs_dR/Bs**2 * Ks) * (1-tanhzKB**2) * (dKs_dR/Ks - dBs_dR/Bs)
+ tanhzKB * (d2Ks_dR2/Ks-(dKs_dR/Ks)**2 - d2Bs_dR2/Bs + (dBs_dR/Bs)**2)))
+ (cos_ng * (dBs_dR/Ks * ztanhzKB * (dKs_dR/Bs - dBs_dR/Bs**2*Ks)
-(d2Bs_dR2/Ks-dBs_dR*dKs_dR/Ks**2) * log_sechzKB)
+ dBs_dR/Ks * log_sechzKB * sin_ng * self._ns * dg_dR)
+ ((cos_ng * (d2Ks_dR2 / Ks**2 - 2 * dKs_dR**2 / Ks**3)
- dKs_dR / Ks**2 * sin_ng * self._ns * dg_dR)
+ (cos_ng * (d2Ds_dR2 / Ds / Ks
- (dDs_dR/Ds)**2 / Ks
- dDs_dR / Ds / Ks**2 * dKs_dR)
- sin_ng * self._ns * dg_dR * dDs_dR / Ds / Ks))))
- 1 / Ks * (cos_ng / Rs
+ (cos_ng * ((dDs_dR * Ks + Ds * dKs_dR) / (Ds * Ks)
- (ztanhzKB * (dBs_dR / Bs * Ks - dKs_dR)
+ log_sechzKB * dBs_dR))
+ sin_ng * self._ns * dg_dR))))))
@check_potential_inputs_not_arrays
def _z2deriv(self, R, z, phi=0, t=0):
"""
NAME:
_z2deriv
PURPOSE:
Evaluate the second (cylindrical) vertical derivative of the potential.
(d^2 potential / d z^2)
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the second vertical derivative
HISTORY:
2017-05-26 Jack Hong (UBC)
"""
g = self._gamma(R, phi - self._omega * t)
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
zKB = z * Ks / Bs
tanh2_zKB = np.tanh(zKB)**2
return -self._H * np.exp(-(R-self._r_ref)/self._Rs) \
* np.sum(self._Cs * Ks / Ds * ((tanh2_zKB - 1) / Bs + tanh2_zKB) * np.cos(self._ns * g) / np.cosh(zKB)**Bs)
@check_potential_inputs_not_arrays
def _phi2deriv(self, R, z, phi=0, t=0):
"""
NAME:
_phi2deriv
PURPOSE:
Evaluate the second azimuthal derivative of the potential in cylindrical coordinates.
(d^2 potential / d phi^2)
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: d^2 potential / d phi^2
HISTORY:
2017-05-29 Jack Hong (UBC)
"""
g = self._gamma(R, phi - self._omega * t)
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
return self._H * np.exp(-(R-self._r_ref) / self._Rs) \
* np.sum(self._Cs * self._N**2. * self._ns**2. / Ds / Ks / np.cosh(z*Ks/Bs)**Bs * np.cos(self._ns*g))
@check_potential_inputs_not_arrays
def _Rzderiv(self, R, z, phi=0., t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
Evaluate the mixed (cylindrical) radial and vertical derivative of the potential
(d^2 potential / dR dz).
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: d^2 potential / dR dz
HISTORY:
2017-05-12 Jack Hong (UBC)
"""
Rs = self._Rs
He = self._H * np.exp(-(R-self._r_ref)/self._Rs)
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
dKs_dR = self._dK_dR(R)
dBs_dR = self._dB_dR(R)
dDs_dR = self._dD_dR(R)
g = self._gamma(R, phi - self._omega * t)
dg_dR = self._dgamma_dR(R)
cos_ng = np.cos(self._ns * g)
sin_ng = np.sin(self._ns * g)
zKB = z * Ks / Bs
sechzKB = 1 / np.cosh(zKB)
sechzKB_Bs = sechzKB**Bs
log_sechzKB = np.log(sechzKB)
tanhzKB = np.tanh(zKB)
return - He * np.sum(sechzKB_Bs * self._Cs / Ds * (Ks * tanhzKB * (self._ns * dg_dR / Ks * sin_ng
+ cos_ng * (z * tanhzKB * (dKs_dR/Ks - dBs_dR/Bs)
- dBs_dR / Ks * log_sechzKB
+ dKs_dR / Ks**2
+ dDs_dR / Ds / Ks))
- cos_ng * ((zKB * (dKs_dR/Ks - dBs_dR/Bs) * (1 - tanhzKB**2)
+ tanhzKB * (dKs_dR/Ks - dBs_dR/Bs)
+ dBs_dR / Bs * tanhzKB)
- tanhzKB / Rs)))
@check_potential_inputs_not_arrays
def _Rphideriv(self, R, z, phi=0,t=0):
"""
NAME:
_Rphideriv
PURPOSE:
Return the mixed radial and azimuthal derivative of the potential in cylindrical coordinates
(d^2 potential / dR dphi)
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the mixed radial and azimuthal derivative
HISTORY:
2017-06-09 Jack Hong (UBC)
"""
He = self._H * np.exp(-(R - self._r_ref) / self._Rs)
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
dKs_dR = self._dK_dR(R)
dBs_dR = self._dB_dR(R)
dDs_dR = self._dD_dR(R)
g = self._gamma(R, phi - self._omega * t)
dg_dR = self._dgamma_dR(R)
cos_ng = np.cos(self._ns * g)
sin_ng = np.sin(self._ns * g)
zKB = z * Ks / Bs
sechzKB = 1 / np.cosh(zKB)
sechzKB_Bs = sechzKB ** Bs
return - He * np.sum(self._Cs * sechzKB_Bs / Ds * self._ns * self._N
* (- self._ns * dg_dR / Ks * cos_ng
+ sin_ng * (z * np.tanh(zKB) * (dKs_dR / Ks - dBs_dR / Bs)
+ 1/Ks * (-dBs_dR * np.log(sechzKB)
+ dKs_dR / Ks
+ dDs_dR / Ds
+ 1 / self._Rs))))
@check_potential_inputs_not_arrays
def _dens(self, R, z, phi=0, t=0):
"""
NAME:
_dens
PURPOSE:
Evaluate the density. If not given, the density is computed using the Poisson equation
from the first and second derivatives of the potential (if all are implemented).
INPUT:
:param R: galactocentric cylindrical radius (must be scalar, not array)
:param z: vertical height (must be scalar, not array)
:param phi: azimuth (must be scalar, not array)
:param t: time (must be scalar, not array)
OUTPUT:
:return: the density
HISTORY:
2017-05-12 Jack Hong (UBC)
"""
g = self._gamma(R, phi - self._omega * t)
Ks = self._K(R)
Bs = self._B(R)
Ds = self._D(R)
ng = self._ns * g
zKB = z * Ks / Bs
sech_zKB = 1 / np.cosh(zKB)
tanh_zKB = np.tanh(zKB)
log_sech_zKB = np.log(sech_zKB)
# numpy of E as defined in the appendix of the paper.
E = 1 + Ks * self._H / Ds * (1 - 0.3 / (1 + 0.3 * Ks * self._H) ** 2) - R / self._Rs \
- (Ks * self._H) * (1 + 0.8 * Ks * self._H) * log_sech_zKB \
- 0.4 * (Ks * self._H) ** 2 * zKB * tanh_zKB
# numpy array of rE' as define in the appendix of the paper.
rE = -Ks * self._H / Ds * (1 - 0.3 * (1 - 0.3 * Ks * self._H) / (1 + 0.3 * Ks * self._H) ** 3) \
+ (Ks * self._H / Ds * (1 - 0.3 / (1 + 0.3 * Ks * self._H) ** 2)) - R / self._Rs \
+ Ks * self._H * (1 + 1.6 * Ks * self._H) * log_sech_zKB \
- (0.4 * (Ks * self._H) ** 2 * zKB * sech_zKB) ** 2 / Bs \
+ 1.2 * (Ks * self._H) ** 2 * zKB * tanh_zKB
return np.sum(self._Cs * self._rho0 * (self._H / (Ds * R)) * np.exp(-(R - self._r_ref) / self._Rs)
* sech_zKB**Bs * (np.cos(ng) * (Ks * R * (Bs + 1) / Bs * sech_zKB**2
- 1 / Ks / R * (E**2 + rE))
- 2 * np.sin(ng)* E * np.cos(self._alpha)))
def OmegaP(self):
"""
NAME:
OmegaP
PURPOSE:
Return the pattern speed. (used to compute the Jacobi integral for orbits).
INPUT:
:param self
OUTPUT:
:return: the pattern speed
HISTORY:
2017-06-09 Jack Hong (UBC)
"""
return self._omega
def _gamma(self, R, phi):
"""Return gamma. (eqn 3 in the paper)"""
return self._N * (phi - self._phi_ref - np.log(R / self._r_ref) / self._tan_alpha)
def _dgamma_dR(self, R):
"""Return the first derivative of gamma wrt R."""
return -self._N / R / self._tan_alpha
def _K(self, R):
"""Return numpy array from K1 up to and including Kn. (eqn. 5)"""
return self._ns * self._N / R / self._sin_alpha
def _dK_dR(self, R):
"""Return numpy array of dK/dR from K1 up to and including Kn."""
return -self._ns * self._N / R**2 / self._sin_alpha
def _B(self, R):
"""Return numpy array from B1 up to and including Bn. (eqn. 6)"""
HNn_R = self._HNn / R
return HNn_R / self._sin_alpha * (0.4 * HNn_R / self._sin_alpha + 1)
def _dB_dR(self, R):
"""Return numpy array of dB/dR from B1 up to and including Bn."""
return -self._HNn / R**3 / self._sin_alpha**2 * (0.8 * self._HNn + R * self._sin_alpha)
def _D(self, R):
"""Return numpy array from D1 up to and including Dn. (eqn. 7)"""
return (0.3 * self._HNn**2 / self._sin_alpha / R
+ self._HNn + R * self._sin_alpha) / (0.3 * self._HNn + R * self._sin_alpha)
def _dD_dR(self, R):
"""Return numpy array of dD/dR from D1 up to and including Dn."""
HNn_R_sina = self._HNn / R / self._sin_alpha
return HNn_R_sina * (0.3 * (HNn_R_sina + 0.3 * HNn_R_sina**2. + 1) / R / (0.3 * HNn_R_sina + 1)**2
- (1/R * (1 + 0.6 * HNn_R_sina) / (0.3 * HNn_R_sina + 1)))
|
the-stack_0_21922 | """Core parametrizer classes for Stratified Spaces.
"""
from collections.abc import Iterable
import geomstats.backend as gs
from tests.conftest import TestCase
class PointSetTestCase(TestCase):
def test_random_point_belongs(self, space_args, n_points):
space = self.testing_data._PointSet(*space_args)
random_point = space.random_point(n_points)
result = gs.all(space.belongs(random_point))
self.assertAllClose(result, True)
def test_random_point_output_shape(self, space, n_samples):
points = space.random_point(n_samples)
self.assertTrue(len(points) == n_samples)
def test_belongs(self, space_args, points, expected):
space = self.testing_data._PointSet(*space_args)
self.assertAllClose(space.belongs(points), expected)
def test_set_to_array(self, space_args, points, expected):
space = self.testing_data._PointSet(*space_args)
self.assertAllClose(space.set_to_array(points), expected)
def test_set_to_array_output_shape(self, space, points):
n = len(points) if type(points) is list else 1
self.assertTrue(space.set_to_array(points).shape[0] == n)
class PointTestCase(TestCase):
def test_to_array(self, point_args, expected):
pt = self.testing_data._Point(*point_args)
self.assertAllClose(pt.to_array(), expected)
class PointSetMetricTestCase(TestCase):
@staticmethod
def _convert_to_gs_array(results, is_list):
if is_list:
resh_res = [[pt.to_array() for pt in pts_geo] for pts_geo in results]
else:
resh_res = [pt.to_array() for pt in results]
return gs.array(resh_res)
def test_dist(self, space_args, point_a, point_b, expected):
space = self.testing_data._PointSet(*space_args)
geom = self.testing_data._PointSetMetric(space)
results = geom.dist(point_a, point_b)
self.assertAllClose(results, expected)
def test_dist_output_shape(self, dist_fnc, point_a, point_b):
results = dist_fnc(point_a, point_b)
is_array = type(point_a) is list or type(point_b) is list
if is_array:
n_dist = max(
len(point_a) if type(point_a) is list else 1,
len(point_b) if type(point_b) is list else 1,
)
self.assertTrue(results.size == n_dist)
else:
self.assertTrue(not isinstance(results, Iterable))
def test_dist_properties(self, dist_fnc, point_a, point_b, point_c):
dist_ab = dist_fnc(point_a, point_b)
dist_ba = dist_fnc(point_b, point_a)
self.assertAllClose(dist_ab, dist_ba)
self.assertAllClose(dist_fnc(point_a, point_a), gs.zeros(1))
dist_ac = dist_fnc(point_a, point_c)
dist_cb = dist_fnc(point_c, point_b)
rhs = dist_ac + dist_cb
assert dist_ab <= (gs.atol + gs.rtol * rhs) + rhs
def test_geodesic(self, space_args, start_point, end_point, t, expected):
space = self.testing_data._PointSet(*space_args)
geom = self.testing_data._PointSetMetric(space)
geodesic = geom.geodesic(start_point, end_point)
pts_result = geodesic(t)
is_list = type(start_point) is list or type(end_point) is list
results = self._convert_to_gs_array(pts_result, is_list)
self.assertAllClose(results, expected)
def test_geodesic_output_shape(self, metric, start_point, end_point, t):
geodesic = metric.geodesic(start_point, end_point)
is_list = type(start_point) is list or type(end_point) is list
n_geo = max(
len(start_point) if type(start_point) is list else 1,
len(end_point) if type(end_point) is list else 1,
)
pt = start_point[0] if type(start_point) is list else start_point
d_array = gs.ndim(pt.to_array())
n_t = len(t) if type(t) is list else 1
results = self._convert_to_gs_array(geodesic(t), is_list)
self.assertTrue(results.ndim == d_array + 1 + int(is_list))
self.assertTrue(results.shape[-d_array - 1] == n_t)
if is_list:
self.assertTrue(results.shape[-d_array - 2] == n_geo)
def test_geodesic_bounds(self, metric, pt_start, pt_end):
geodesic = metric.geodesic(pt_start, pt_end)
results = geodesic([0.0, 1.0])
for pt, pt_res in zip([pt_start, pt_end], results):
self.assertAllClose(pt_res.to_array(), pt.to_array())
|
the-stack_0_21925 | """
Utilities for displaying salient objects maps. The method used in these functions is explained in:
Explaining How a Deep Neural Network Trained with End-to-End Learning Steers a Car
https://arxiv.org/abs/1704.07911
"""
__author__ = "András Kalapos"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2020 András Kalapos"
import numpy as np
import cv2
import tensorflow as tf
def find_layer_by_name(model, name):
layer_idx = -1
for i, layer in enumerate(model.layers):
if layer.name == name:
layer_idx = i
return layer_idx
def nvidia_salient_map(model: tf.keras.Model, obs, output_vector_idx=None):
"""
Explaining How a Deep Neural Network Trained with End-to-End Learning Steers a Car
https://arxiv.org/abs/1704.07911
"""
layer_outputs = [model.layers[find_layer_by_name(model, 'conv_out')].output,
model.layers[find_layer_by_name(model, 'conv3')].output,
model.layers[find_layer_by_name(model, 'conv2')].output,
model.layers[find_layer_by_name(model, 'conv1')].output]
model_partial = tf.keras.Model(inputs=model.inputs, outputs=layer_outputs)
activations = model_partial.predict(obs[None, ...])
if output_vector_idx is None:
salient_map = np.average(activations[0][0, :, :, :], axis=2)
else:
salient_map = activations[0][0, :, :, output_vector_idx]
for idx in range(1, len(activations)):
# averaging of feature maps and element wise multiplication with previous layer's salient map
salient_map = np.multiply(salient_map, np.average(activations[idx][0, :, :, :], axis=2))
if idx < len(activations) - 1:
salient_map = cv2.resize(salient_map, activations[idx + 1][0, :, :, 0].shape[::-1])
salient_map = cv2.resize(salient_map, obs.shape[:2])
# Saliency values are sometimes negative (if the output was negative)
salient_map = np.abs(salient_map)
# Scale to the 0.0-1.0 range
if np.max(salient_map) != np.min(salient_map):
salient_map = (salient_map - np.min(salient_map))/(np.max(salient_map) - np.min(salient_map))
else:
salient_map = np.zeros_like(salient_map)
action_out = activations[0]
return salient_map, action_out
def display_salient_map(salient_map, obs, window_title="Saliency", frames_in_stack_to_be_displayed=(0, 1, 2)):
obs_bgr = obs[..., [2, 1, 0, 5, 4, 3, 8, 7, 6]]
saliency_heatmap = cv2.applyColorMap((salient_map * 255).astype(np.uint8), cv2.COLORMAP_JET) / 255.
# saliency_heatmap = np.repeat(saliency_map[...,None], 3, axis=2)
# saliency_heatmap = saliency_map[...,None] * [[[0., 0., 1.]]]
to_merge_rows = []
for i in frames_in_stack_to_be_displayed:
obs_i = obs_bgr[..., 3 * i:3 * (i + 1)]
saliency_heatmap_overlayed = cv2.addWeighted(obs_i, 0.5, saliency_heatmap, 0.5, 0)
to_merge_rows.append(np.concatenate([saliency_heatmap, obs_i, saliency_heatmap_overlayed], axis=1))
merged = np.concatenate(to_merge_rows, axis=0)
# saliency_heatmap_overlayed = cv2.addWeighted(obs_bgr[..., :3], 0.5, saliency_heatmap, 0.5, 0)
# merged = np.concatenate([saliency_heatmap, obs_bgr[...,:3], saliency_heatmap_overlayed], axis=1)
cv2.imshow(window_title, merged)
cv2.waitKey(1)
def display_salient_map2(salient_map, obs, window_title="Saliency", frames_in_stack_to_be_displayed=(0, 1, 2),
use_color_map=True, overlay_only=True):
"""
:param salient_map: Saleient object map, normed between 0 and 1
:param obs: Observations, or stack of 3 observations. Channel order must be RGB(RGBRGB).
Float representation is expected
:param window_title:
:param frames_in_stack_to_be_displayed: If frame stacking is used, select which RGB frames should be displayed
:param use_color_map: Display salient obj map as a colored heatmap, or highlight salient objects with a single blue
color (opacity of the overlay is varied based on salient map values)
:param overlay_only: Display overlay only or also display the heatmap and the observation separately
"""
if obs.shape[2] == 9:
obs_bgr = obs[..., [2, 1, 0, 5, 4, 3, 8, 7, 6]]
else:
obs_bgr = obs[..., [2, 1, 0]]
frames_in_stack_to_be_displayed = [0]
salient_map = cv2.resize(salient_map, obs_bgr.shape[1::-1])
if use_color_map:
saliency_heatmap = cv2.applyColorMap(((1-salient_map) * 255).astype(np.uint8), cv2.COLORMAP_JET) / 255.
else:
saliency_heatmap = np.ones((salient_map.shape[0], salient_map.shape[1], 3))
saliency_heatmap = saliency_heatmap * np.array([[[1., 0, 0]]]) # "Heatmap" color is blue
saliency_heatmap = saliency_heatmap.astype(np.float32)
to_merge_rows = []
for i in frames_in_stack_to_be_displayed:
obs_i = obs_bgr[..., 3 * i:3 * (i + 1)].astype(np.float32)
saliency_heatmap_overlayed = cv2.addWeighted(obs_i * (1 - np.power(salient_map[..., None], 2)), 1.0,
saliency_heatmap * np.power(salient_map[..., None], 2), 1.0, 0)
if overlay_only:
to_merge_rows.append(saliency_heatmap_overlayed)
else:
to_merge_rows.append(np.concatenate([saliency_heatmap, obs_i, saliency_heatmap_overlayed], axis=1))
merged = np.concatenate(to_merge_rows, axis=0)
# merged = cv2.resize(merged, tuple(np.array(merged.shape[:2]) * 4), interpolation=cv2.INTER_LANCZOS4)
cv2.imshow(window_title, merged)
cv2.waitKey(1)
return merged
|
the-stack_0_21926 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains overloads to convert Python to equivalent JAX code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from jax import lax
from pyctr.overloads import py_defaults
from pyctr.overloads import staging
init = py_defaults.init
assign = py_defaults.assign
read = py_defaults.read
call = py_defaults.call
def if_stmt(cond, body, orelse, local_writes):
"""Functional form of an if statement.
Args:
cond: Callable with no arguments, predicate of conditional.
body: Callable with no arguments, and outputs of the positive (if) branch as
return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
local_writes: list(pyct.Variable), list of variables assigned in either body
or orelse.
Returns:
Tuple containing the statement outputs.
"""
cond_result = cond()
def if_body(*_):
modified_vals, _ = staging.execute_isolated(body, local_writes)
return modified_vals
def if_orelse(*_):
modified_vals, _ = staging.execute_isolated(orelse, local_writes)
return modified_vals
result_values = lax.cond(cond_result, (), if_body, (), if_orelse)
for var, retval in zip(local_writes, result_values):
var.val = retval
return result_values
def while_stmt(cond, body, _, local_writes):
"""Functional form of a while statement."""
local_writes = [
var for var in local_writes if not py_defaults.is_undefined(var.val)
]
def while_test(state):
for var, s in zip(local_writes, state):
var.val = s
_, result_values = staging.execute_isolated(cond, local_writes)
return result_values
def while_body(state):
for var, s in zip(local_writes, state):
var.val = s
modified_vals, _ = staging.execute_isolated(body, local_writes)
return modified_vals
result_values = lax.while_loop(while_test, while_body,
[var.val for var in local_writes])
for var, val in zip(local_writes, result_values):
var.val = val
return result_values
def for_stmt(target, iter_, body, orelse, modified_vars):
"""Functional form of a for statement."""
del orelse
modified_vars = [
var for var in modified_vars if not py_defaults.is_undefined(var.val)
]
def for_body(idx, state):
for var, s in zip(modified_vars, state):
var.val = s
target.val = iter_[idx]
modified_vals, _ = staging.execute_isolated(body, modified_vars)
return modified_vals
results = lax.fori_loop(0, len(iter_), for_body,
[var.val for var in modified_vars])
for var, val in zip(modified_vars, results):
var.val = val
|
the-stack_0_21927 | import pickle
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import simps
from weather.scraper.flight_conditions import properties, Airframe
# Define object
fuel = 56*6.01*0.4535
initial_mass = 1111
final_mass = initial_mass-fuel
C172_props = properties({'Cl_alpha': 5.143, 'Cl_0': 0.31,
'planform': 16.1651, 'density': 0.770488088,
'mass_min': final_mass, 'mass_max': initial_mass,
'incidence': 0.})
C172 = Airframe(airframe='C172', timestamp=1549036800,
filepath='../../../weather/data/flight_plan/v_aoa_pickles/icao24s_',
properties=C172_props)
C172.retrieve_data(load_data=True)
C172.train_pdf(1000)
# Calculating total probability
xgrid = np.linspace(-5, 35, 1000)
ygrid = np.linspace(20, 75, 1000)
X, Y = np.meshgrid(xgrid, ygrid)
Z = np.exp(C172.pdf.score_samples(np.array([X.ravel(), Y.ravel()]).T))
Z = np.reshape(Z, X.shape)
total_list = []
for i in range(len(Z)):
# print('X', X[i])
# print('Y', Y[:, 0])
# print('Z', Z[i, :])
numerator = simps(Z[i, :], X[i])
total_list.append(numerator)
total = simps(total_list, Y[:, 0])
print('Probability total', total)
# Plot histograms
parameters = []
for i in range(200):
sample = C172.pdf.sample(1)
while sample[0][0]<0 or sample[0][0]>12:
sample = C172.pdf.sample(1)
parameters.append(sample)
C172.samples = np.array(parameters)
f = open('c172.p', 'wb')
pickle.dump(C172, f)
f.close()
# Plot PDF
C172.plot_pdf()
x, y = C172.samples.T
plt.scatter(x, y, c='k')
plt.show()
|
the-stack_0_21928 | import sys
from typing import List
import click
from globus_sdk import GlobusError, TransferClient
from timer_cli.auth import get_authorizers_for_scopes
TRANSFER_ALL_SCOPE = "urn:globus:auth:scope:transfer.api.globus.org:all"
def endpoints_not_activated(
transfer_client: TransferClient, endpoints: List[str]
) -> List[str]:
"""
Filter list of endpoint UUIDs, returning unactivated ones.
Exit 1 if transfer responds with an error trying to look up endpoints.
"""
result = []
for endpoint in endpoints:
try:
if not transfer_client.get_endpoint(endpoint).get("activated"):
result.append(endpoint)
except GlobusError as e:
click.echo(
f"couldn't get information for endpoint {endpoint}:"
f" {e.code}, {e.message}",
err=True,
)
sys.exit(1)
return result
def error_if_not_activated(
transfer_client: TransferClient,
endpoints: List[str],
reactivate_if_expires_in=86400,
):
not_activated = endpoints_not_activated(transfer_client, endpoints)
still_not_activated = []
for endpoint in not_activated:
response = transfer_client.endpoint_autoactivate(
endpoint, if_expires_in=reactivate_if_expires_in
)
if response.get("code") == "AutoActivationFailed":
still_not_activated.append(endpoint)
if still_not_activated:
click.echo(
f"Error: requested endpoint is not activated: {', '.join(not_activated)}\n"
"Open in the web app to activate:",
err=True,
)
for endpoint in not_activated:
click.echo(
f" https://app.globus.org/file-manager?origin_id={endpoint}",
err=True,
)
sys.exit(1)
def get_transfer_client():
authorizers = get_authorizers_for_scopes([TRANSFER_ALL_SCOPE])
return TransferClient(authorizers.get(TRANSFER_ALL_SCOPE))
|
the-stack_0_21929 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import itertools
import string
import unittest
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.csv import read_csv, ReadOptions, ParseOptions
def generate_col_names():
# 'a', 'b'... 'z', then 'aa', 'ab'...
letters = string.ascii_lowercase
for letter in letters:
yield letter
for first in letter:
for second in letter:
yield first + second
def make_random_csv(num_cols=2, num_rows=10, linesep=u'\r\n'):
arr = np.random.RandomState(42).randint(0, 1000, size=(num_cols, num_rows))
col_names = list(itertools.islice(generate_col_names(), num_cols))
csv = io.StringIO()
csv.write(u",".join(col_names))
csv.write(linesep)
for row in arr.T:
csv.write(u",".join(map(str, row)))
csv.write(linesep)
csv = csv.getvalue().encode()
columns = [pa.array(a, type=pa.int64()) for a in arr]
expected = pa.Table.from_arrays(columns, col_names)
return csv, expected
def test_read_options():
cls = ReadOptions
opts = cls()
assert opts.block_size > 0
opts.block_size = 12345
assert opts.block_size == 12345
assert opts.use_threads is True
opts.use_threads = False
assert opts.use_threads is False
opts = cls(block_size=1234, use_threads=False)
assert opts.block_size == 1234
assert opts.use_threads is False
def test_parse_options():
cls = ParseOptions
opts = cls()
assert opts.delimiter == ','
assert opts.quote_char == '"'
assert opts.double_quote is True
assert opts.escape_char is False
assert opts.header_rows == 1
assert opts.newlines_in_values is False
opts.delimiter = 'x'
assert opts.delimiter == 'x'
assert opts.quote_char == '"'
opts.escape_char = 'z'
assert opts.escape_char == 'z'
assert opts.quote_char == '"'
opts.quote_char = False
assert opts.quote_char is False
assert opts.escape_char == 'z'
opts.escape_char = False
assert opts.escape_char is False
assert opts.quote_char is False
opts.newlines_in_values = True
assert opts.newlines_in_values is True
opts.header_rows = 2
assert opts.header_rows == 2
opts = cls(delimiter=';', quote_char='%', double_quote=False,
escape_char='\\', header_rows=2, newlines_in_values=True)
assert opts.delimiter == ';'
assert opts.quote_char == '%'
assert opts.double_quote is False
assert opts.escape_char == '\\'
assert opts.header_rows == 2
assert opts.newlines_in_values is True
class BaseTestCSVRead:
def read_bytes(self, b, **kwargs):
return self.read_csv(pa.py_buffer(b), **kwargs)
def check_names(self, table, names):
assert table.num_columns == len(names)
assert [c.name for c in table.columns] == names
def test_header(self):
rows = b"abc,def,gh\n"
table = self.read_bytes(rows)
assert isinstance(table, pa.Table)
self.check_names(table, ["abc", "def", "gh"])
assert table.num_rows == 0
def test_simple_ints(self):
# Infer integer columns
rows = b"a,b,c\n1,2,3\n4,5,6\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.int64()),
('b', pa.int64()),
('c', pa.int64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_simple_varied(self):
# Infer various kinds of data
rows = b"a,b,c\n1,2,3\n4.0,-5,foo\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.binary())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, 4.0],
'b': [2, -5],
'c': [b"3", b"foo"],
}
def test_simple_nulls(self):
# Infer various kinds of data, with nulls
rows = (b"a,b,c,d\n"
b"1,2,,\n"
b"nan,-5,foo,\n"
b"4.5,#N/A,nan,\n")
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.binary()),
('d', pa.null())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, None, 4.5],
'b': [2, -5, None],
'c': [b"", b"foo", b"nan"],
'd': [None, None, None]
}
def test_no_ending_newline(self):
# No \n after last line
rows = b"a,b,c\n1,2,3\n4,5,6"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_trivial(self):
# A bit pointless, but at least it shouldn't crash
rows = b"\n\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {'': [None]}
def test_invalid_csv(self):
# Various CSV errors
rows = b"a,b,c\n1,2\n4,5,6\n"
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 2"):
self.read_bytes(rows)
rows = b"a,b,c\n1,2,3\n4"
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 1"):
self.read_bytes(rows)
rows = b""
with pytest.raises(pa.ArrowInvalid, match="Empty CSV file"):
self.read_bytes(rows)
def test_options_delimiter(self):
rows = b"a;b,c\nde,fg;eh\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a;b': [b'de'],
'c': [b'fg;eh'],
}
opts = ParseOptions(delimiter=';')
table = self.read_bytes(rows, parse_options=opts)
assert table.to_pydict() == {
'a': [b'de,fg'],
'b,c': [b'eh'],
}
def test_small_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=10)
table = self.read_bytes(csv)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_stress_block_sizes(self):
# Test a number of small block sizes to stress block stitching
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
block_sizes = [11, 12, 13, 17, 37, 111]
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
for csv in csvs:
for block_size in block_sizes:
read_options = ReadOptions(block_size=block_size)
table = self.read_bytes(csv, read_options=read_options)
assert table.schema == expected.schema
if not table.equals(expected):
# Better error output
assert table.to_pydict() == expected.to_pydict()
class TestSerialCSVRead(BaseTestCSVRead, unittest.TestCase):
def read_csv(self, *args, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = False
table = read_csv(*args, **kwargs)
table._validate()
return table
class TestParallelCSVRead(BaseTestCSVRead, unittest.TestCase):
def read_csv(self, *args, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = True
table = read_csv(*args, **kwargs)
table._validate()
return table
|
the-stack_0_21930 | import os
import logging
import pyxdf
logging.basicConfig(level=logging.DEBUG) # Use logging.INFO to reduce output.
fname = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'xdf_sample.xdf'))
streams, fileheader = pyxdf.load_xdf(fname)
print("Found {} streams:".format(len(streams)))
for ix, stream in enumerate(streams):
print("Stream {}: {} - type {} - uid {} - shape {} at {} Hz (effective {} Hz)".format(
ix + 1, stream['info']['name'][0],
stream['info']['type'][0],
stream['info']['uid'][0],
(int(stream['info']['channel_count'][0]), len(stream['time_stamps'])),
stream['info']['nominal_srate'][0],
stream['info']['effective_srate'])
)
if any(stream['time_stamps']):
print("\tDuration: {} s".format(stream['time_stamps'][-1] - stream['time_stamps'][0]))
print("Done.")
|
the-stack_0_21931 | from conan.tools.microsoft import msvc_runtime_flag
from conan.tools.files import rename
from conans import ConanFile, tools, AutoToolsBuildEnvironment
from conans.errors import ConanInvalidConfiguration
import contextlib
import functools
import os
required_conan_version = ">=1.36.0"
class NsprConan(ConanFile):
name = "nspr"
homepage = "https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSPR"
description = "Netscape Portable Runtime (NSPR) provides a platform-neutral API for system level and libc-like functions."
topics = ("nspr", "libc")
url = "https://github.com/conan-io/conan-center-index"
license = "MPL-2.0"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_mozilla": [True, False],
"win32_target": ["winnt", "win95"],
}
default_options = {
"shared": False,
"fPIC": True,
"with_mozilla": True,
"win32_target": "winnt",
}
@property
def _is_msvc(self):
return str(self.settings.compiler) in ["Visual Studio", "msvc"]
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
else:
del self.options.win32_target
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.cppstd
del self.settings.compiler.libcxx
def validate(self):
# https://bugzilla.mozilla.org/show_bug.cgi?id=1658671
if tools.Version(self.version) < "4.29":
if self.settings.os == "Macos" and self.settings.arch == "armv8":
raise ConanInvalidConfiguration("NSPR does not support mac M1 before 4.29")
def build_requirements(self):
if self._settings_build.os == "Windows":
self.build_requires("mozilla-build/3.3")
if not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination="tmp", strip_root=True)
rename(self, os.path.join("tmp", "nspr"), self._source_subfolder)
tools.rmdir("tmp")
@contextlib.contextmanager
def _build_context(self):
if self._is_msvc:
with tools.vcvars(self):
with tools.environment_append({"CC": "cl", "CXX": "cl", "LD": "link"}):
yield
else:
yield
@functools.lru_cache(1)
def _configure_autotools(self):
autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
yes_no = lambda v: "yes" if v else "no"
conf_args = [
"--with-mozilla={}".format(yes_no(self.options.with_mozilla)),
"--enable-64bit={}".format(yes_no(self.settings.arch in ("armv8", "x86_64", "mips64", "ppc64", "ppc64le"))),
"--enable-strip={}".format(yes_no(self.settings.build_type not in ("Debug", "RelWithDebInfo"))),
"--enable-debug={}".format(yes_no(self.settings.build_type == "Debug")),
"--datarootdir={}".format(tools.unix_path(os.path.join(self.package_folder, "res"))),
"--disable-cplus",
]
if self._is_msvc:
conf_args.extend([
"{}-pc-mingw32".format("x86_64" if self.settings.arch == "x86_64" else "x86"),
"--enable-static-rtl={}".format(yes_no("MT" in msvc_runtime_flag(self))),
"--enable-debug-rtl={}".format(yes_no("d" in msvc_runtime_flag(self))),
])
elif self.settings.os == "Android":
conf_args.extend([
"--with-android-ndk={}".format(tools.get_env(["NDK_ROOT"])),
"--with-android-version={}".format(self.settings.os.api_level),
"--with-android-platform={}".format(tools.get_env("ANDROID_PLATFORM")),
"--with-android-toolchain={}".format(tools.get_env("ANDROID_TOOLCHAIN")),
])
elif self.settings.os == "Windows":
conf_args.append("--enable-win32-target={}".format(self.options.win32_target))
env = autotools.vars
if self.settings.os == "Macos":
if self.settings.arch == "armv8":
# conan adds `-arch`, which conflicts with nspr's apple silicon support
env["CFLAGS"] = env["CFLAGS"].replace("-arch arm64", "")
env["CXXFLAGS"] = env["CXXFLAGS"].replace("-arch arm64", "")
autotools.configure(args=conf_args, vars=env)
return autotools
def build(self):
with tools.chdir(self._source_subfolder):
# relocatable shared libs on macOS
tools.replace_in_file(
"configure",
"-install_name @executable_path/",
"-install_name @rpath/"
)
with self._build_context():
autotools = self._configure_autotools()
autotools.make()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
with tools.chdir(self._source_subfolder):
with self._build_context():
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "bin"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
if self.settings.os == "Windows":
if self.options.shared:
os.mkdir(os.path.join(self.package_folder, "bin"))
for lib in self._library_names:
libsuffix = "lib" if self._is_msvc else "a"
libprefix = "" if self._is_msvc else "lib"
if self.options.shared:
os.unlink(os.path.join(self.package_folder, "lib", "{}{}_s.{}".format(libprefix, lib, libsuffix)))
rename(self, os.path.join(self.package_folder, "lib", "{}.dll".format(lib)),
os.path.join(self.package_folder, "bin", "{}.dll".format(lib)))
else:
os.unlink(os.path.join(self.package_folder, "lib", "{}{}.{}".format(libprefix, lib, libsuffix)))
os.unlink(os.path.join(self.package_folder, "lib", "{}.dll".format(lib)))
if not self.options.shared:
tools.replace_in_file(os.path.join(self.package_folder, "include", "nspr", "prtypes.h"),
"#define NSPR_API(__type) PR_IMPORT(__type)",
"#define NSPR_API(__type) extern __type")
tools.replace_in_file(os.path.join(self.package_folder, "include", "nspr", "prtypes.h"),
"#define NSPR_DATA_API(__type) PR_IMPORT_DATA(__type)",
"#define NSPR_DATA_API(__type) extern __type")
else:
shared_ext = "dylib" if self.settings.os == "Macos" else "so"
for lib in self._library_names:
if self.options.shared:
os.unlink(os.path.join(self.package_folder, "lib", "lib{}.a".format(lib)))
else:
os.unlink(os.path.join(self.package_folder, "lib", "lib{}.{}".format(lib, shared_ext)))
if self._is_msvc:
if self.settings.build_type == "Debug":
for lib in self._library_names:
os.unlink(os.path.join(self.package_folder, "lib", "{}.pdb".format(lib)))
if not self.options.shared or self.settings.os == "Windows":
for f in os.listdir(os.path.join(self.package_folder, "lib")):
os.chmod(os.path.join(self.package_folder, "lib", f), 0o644)
@property
def _library_names(self):
return ["plds4", "plc4", "nspr4"]
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "nspr")
libs = self._library_names
if self.settings.os == "Windows" and not self.options.shared:
libs = list("{}_s".format(l) for l in libs)
self.cpp_info.libs = libs
if self.settings.compiler == "gcc" and self.settings.os == "Windows":
if self.settings.arch == "x86":
self.cpp_info.defines.append("_M_IX86")
elif self.settings.arch == "x86_64":
self.cpp_info.defines.append("_M_X64")
self.cpp_info.includedirs.append(os.path.join("include", "nspr"))
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.extend(["dl", "pthread"])
elif self.settings.os == "Windows":
self.cpp_info.system_libs.extend(["winmm", "ws2_32"])
aclocal = tools.unix_path(os.path.join(self.package_folder, "res", "aclocal"))
self.output.info("Appending AUTOMAKE_CONAN_INCLUDES environment variable: {}".format(aclocal))
self.env_info.AUTOMAKE_CONAN_INCLUDES.append(aclocal)
self.cpp_info.resdirs = ["res"]
|
the-stack_0_21932 | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Exploratory Data Analysis
#
# ## FARS Data comparing Fatalities in Denver and Seattle
# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import glob
from matplotlib import cm
# %%
colors = ["darkorange", "darkred"]
sns.set(rc={"figure.figsize": (4, 6)})
sns.set_palette(sns.color_palette(colors))
# %%
def concat_csv(glob_path):
files = glob.glob(glob_path)
dfs = [pd.read_csv(file, engine="python", header=0) for file in files]
return (
pd.concat(dfs, ignore_index=True)
.sort_values(by=["Year"])
.set_index(["Year", "City_Name"])
)
# %% [markdown]
# ## Total Fatalities
# %%
path = "../data/better_streets/processed/FARS/den_sea_ped_bike_fatalities.csv/*.csv"
# %%
yr_df = concat_csv(path)
# %%
yr_df.head()
# %%
uyr_df = pd.pivot_table(
yr_df, index="Year", values="Ped_Bike_Fatalities", columns=["City_Name"]
)
uyr_df.head()
# %%
bar_plot = uyr_df.plot.bar(
figsize=(12, 8),
title="Pedestrian and Cyclist Fatalities",
color=["darkred", "darkorange"],
)
bar_plot.plot()
plt.savefig("./plots/den_sea_ped_bar.png")
# %%
trend_plot = sns.lmplot(
x="Year",
y="Ped_Bike_Fatalities",
hue="City_Name",
data=yr_df.reset_index(),
legend=False,
)
plt.subplots_adjust(top=0.9)
plt.legend(loc="upper left")
plt.title("Pedestrian and Cyclist Fatalities")
plt.show()
trend_plot.savefig("./plots/den_sea_ped_trend.png", dpi=150)
# %% [markdown]
# ## Looking at Pedestrians and Cyclist Fatalities Separately
# %%
ped_df = concat_csv(
"../data/better_streets/processed/FARS/den_sea_ped_fatalities.csv/*.csv"
)
ped_df.shape, ped_df.columns
# %%
bike_df = concat_csv(
"../data/better_streets/processed/FARS/den_sea_bike_fatalities.csv/*.csv"
)
bike_df.shape, bike_df.columns
# %%
pb_df = ped_df.join(other=bike_df).fillna(value=0).reset_index()
pb_df.columns
# %%
pb_df["Total"] = pb_df["Ped_Fatalities"] + pb_df["Bike_Fatalities"]
pb_df = pb_df.rename(
columns={"Ped_Fatalities": "Pedestrians", "Bike_Fatalities": "Cyclists"}
)
pb_df.columns
|
the-stack_0_21934 | from .utils import DslBase
def SF(name_or_sf, **params):
# {"script_score": {"script": "_score"}, "filter": {}}
if isinstance(name_or_sf, dict):
if params:
raise #XXX
kwargs = {}
sf = name_or_sf.copy()
for k in ScoreFunction._param_defs:
if k in name_or_sf:
kwargs[k] = sf.pop(k)
if len(sf) != 1:
raise #XXX
name, params = sf.popitem()
# boost factor special case, see elasticsearch #6343
if not isinstance(params, dict):
params = {'value': params}
kwargs.update(params)
return ScoreFunction.get_dsl_class(name)(**kwargs)
# ScriptScore(script="_score", filter=F())
if isinstance(name_or_sf, ScoreFunction):
if params:
raise #XXX
return name_or_sf
# "script_score", script="_score", filter=F()
return ScoreFunction.get_dsl_class(name_or_sf)(**params)
class ScoreFunction(DslBase):
_type_name = 'score_function'
_type_shortcut = staticmethod(SF)
_param_defs = {
'query': {'type': 'query'},
'filter': {'type': 'filter'},
}
name = None
def to_dict(self):
d = super(ScoreFunction, self).to_dict()
# filter and query dicts should be at the same level as us
for k in self._param_defs:
if k in d[self.name]:
d[k] = d[self.name].pop(k)
return d
class ScriptScore(ScoreFunction):
name = 'script_score'
class BoostFactor(ScoreFunction):
name = 'boost_factor'
def to_dict(self):
d = super(BoostFactor, self).to_dict()
if 'value' in d[self.name]:
d[self.name] = d[self.name].pop('value')
return d
class Random(ScoreFunction):
name = 'random'
class FieldValueFactor(ScoreFunction):
name = 'field_value_factor'
class Linear(ScoreFunction):
name = 'linear'
class Gauss(ScoreFunction):
name = 'gauss'
class Exp(ScoreFunction):
name = 'exp'
|
the-stack_0_21937 | import io
import re
import chardet
from plyara import Plyara
from rest_framework.views import exception_handler
from rest_framework.exceptions import APIException
def check_lexical_convention(entry):
return Plyara.is_valid_rule_name(entry)
def generate_kwargs_from_parsed_rule(parsed_rule):
# Generate parsed rule kwargs for saving a rule
name = parsed_rule['rule_name']
tags = parsed_rule.get('tags', [])
scopes = parsed_rule.get('scopes', [])
# TODO : Update when Plyara moves to clean Python types
metadata = parsed_rule.get('metadata', {})
for key, value in metadata.items():
if value not in ('true', 'false'):
try:
value = int(value)
except ValueError:
metadata[key] = '"' + value + '"'
strings = parsed_rule.get('strings', [])
condition = parsed_rule['condition_terms']
# TODO : Update when Plyara moves to stripping quotes from detect_imports module
imports = [imp.strip('"') for imp in Plyara.detect_imports(parsed_rule)]
comments = parsed_rule.get('comments', [])
dependencies = Plyara.detect_dependencies(parsed_rule)
# Calculate hash value of rule strings and condition
logic_hash = Plyara.generate_logic_hash(parsed_rule)
# TEMP FIX - Use only a single instance of a metakey
# until YaraGuardian models and functions can be updated
for key, value in metadata.items():
if isinstance(value, list):
metadata[key] = value[0]
return {'name': name,
'tags': list(set(tags)),
'scopes': list(set(scopes)),
'imports': list(set(imports)),
'comments': list(set(comments)),
'metadata': metadata,
'strings': strings,
'condition': condition,
'dependencies': dependencies,
'logic_hash': logic_hash}
def parse_rule_submission(raw_submission):
# Instantiate Parser
parser = Plyara()
# Container for results
submission_results = {'parsed_rules': [],
'parser_error': ''}
try:
# Check if submission needs to be read and decoded
if hasattr(raw_submission, 'read'):
raw_content = raw_submission.read()
# Attempt to automatically detect encoding
encoding = chardet.detect(raw_content)['encoding']
yara_content = raw_content.decode(encoding=encoding)
else:
yara_content = raw_submission
except Exception:
# Unable to decode or read the submitted content
yara_content = None
submission_results['parser_error'] = "Unable to read submission content"
# Ensure content is not blank before passing to parser
if yara_content:
try:
submission_results['parsed_rules'] = parser.parse_string(yara_content)
except Exception as error:
submission_results['parser_error'] = str(error)
return submission_results
def build_yarafile(queryset):
rules = queryset.order_by('dependencies')
# Temporary rule file container
temp_file = io.StringIO()
# Build import search patterns
import_options = Plyara.IMPORT_OPTIONS
import_pattern = 'import \"(?:{})\"\n'.format('|'.join(import_options))
for rule in rules.iterator():
# name, tags, imports, metadata, strings, condition, scopes
formatted_rule = rule.format_rule()
temp_file.write(formatted_rule)
temp_file.write('\n\n')
present_imports = set(re.findall(import_pattern, temp_file.getvalue()))
importless_file = re.sub(import_pattern, '', temp_file.getvalue())
# Finalized rule file container
rule_file = io.StringIO()
for import_value in present_imports:
rule_file.write(import_value)
rule_file.write('\n\n')
rule_file.write(importless_file)
return rule_file
def custom_exception_handler(exc, context):
response_content = {}
response = exception_handler(exc, context)
if response is not None:
response_content['status_code'] = response.status_code
if 'detail' not in response.data:
response_content['errors'] = response.data
else:
response_content['errors'] = [response.data['detail']]
response.data = response_content
return response
|
the-stack_0_21938 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import asyncio
import logging
from typing import Any, Dict, List, Optional
from fbpcp.entity.container_instance import ContainerInstance, ContainerInstanceStatus
from fbpcp.entity.mpc_instance import MPCInstance, MPCInstanceStatus, MPCParty
from fbpcp.repository.mpc_instance import MPCInstanceRepository
from fbpcp.service.container import ContainerService
from fbpcp.service.mpc_game import MPCGameService
from fbpcp.service.onedocker import OneDockerService
from fbpcp.util.typing import checked_cast
DEFAULT_BINARY_VERSION = "latest"
class MPCService:
"""MPCService is responsible for distributing a larger MPC game to multiple
MPC workers
"""
def __init__(
self,
container_svc: ContainerService,
instance_repository: MPCInstanceRepository,
task_definition: str,
mpc_game_svc: MPCGameService,
) -> None:
"""Constructor of MPCService
Keyword arguments:
container_svc -- service to spawn container instances
instance_repository -- repository to CRUD MPCInstance
task_definition -- containers task definition
mpc_game_svc -- service to generate package name and game arguments.
"""
if container_svc is None or instance_repository is None or mpc_game_svc is None:
raise ValueError(
f"Dependency is missing. container_svc={container_svc}, mpc_game_svc={mpc_game_svc}, "
f"instance_repository={instance_repository}"
)
self.container_svc = container_svc
self.instance_repository = instance_repository
self.task_definition = task_definition
self.mpc_game_svc: MPCGameService = mpc_game_svc
self.logger: logging.Logger = logging.getLogger(__name__)
self.onedocker_svc = OneDockerService(self.container_svc, self.task_definition)
"""
The game_args should be consistent with the game_config, which should be
defined in caller's game repository.
For example,
If the game config looks like this:
game_config = {
"game": {
"onedocker_package_name": "package_name",
"arguments": [
{"name": "input_filenames", "required": True},
{"name": "input_directory", "required": True},
{"name": "output_filenames", "required": True},
{"name": "output_directory", "required": True},
{"name": "concurrency", "required": True},
],
},
The game args should look like this:
[
# 1st container
{
"input_filenames": input_path_1,
"input_directory": input_directory,
"output_filenames": output_path_1,
"output_directory": output_directory,
"concurrency": cocurrency,
},
# 2nd container
{
"input_filenames": input_path_2,
"input_directory": input_directory,
"output_filenames": output_path_2,
"output_directory": output_directory,
"concurrency": cocurrency,
},
]
"""
def create_instance(
self,
instance_id: str,
game_name: str,
mpc_party: MPCParty,
num_workers: int,
server_ips: Optional[List[str]] = None,
game_args: Optional[List[Dict[str, Any]]] = None,
) -> MPCInstance:
self.logger.info(f"Creating MPC instance: {instance_id}")
instance = MPCInstance(
instance_id,
game_name,
mpc_party,
num_workers,
server_ips,
[],
MPCInstanceStatus.CREATED,
game_args,
)
self.instance_repository.create(instance)
return instance
def start_instance(
self,
instance_id: str,
output_files: Optional[List[str]] = None,
server_ips: Optional[List[str]] = None,
timeout: Optional[int] = None,
version: str = DEFAULT_BINARY_VERSION,
) -> MPCInstance:
return asyncio.run(
self.start_instance_async(
instance_id, output_files, server_ips, timeout, version
)
)
async def start_instance_async(
self,
instance_id: str,
output_files: Optional[List[str]] = None,
server_ips: Optional[List[str]] = None,
timeout: Optional[int] = None,
version: str = DEFAULT_BINARY_VERSION,
) -> MPCInstance:
"""To run a distributed MPC game
Keyword arguments:
instance_id -- unique id to identify the MPC instance
"""
instance = self.instance_repository.read(instance_id)
self.logger.info(f"Starting MPC instance: {instance_id}")
if instance.mpc_party is MPCParty.CLIENT and not server_ips:
raise ValueError("Missing server_ips")
# spin up containers
self.logger.info("Spinning up container instances")
game_args = instance.game_args
instance.containers = await self._spin_up_containers_onedocker(
instance.game_name,
instance.mpc_party,
instance.num_workers,
game_args,
server_ips,
timeout,
version,
)
if len(instance.containers) != instance.num_workers:
self.logger.warning(
f"Instance {instance_id} has {len(instance.containers)} containers spun up, but expecting {instance.num_workers} containers!"
)
if instance.mpc_party is MPCParty.SERVER:
ip_addresses = [
checked_cast(str, instance.ip_address)
for instance in instance.containers
]
instance.server_ips = ip_addresses
instance.status = MPCInstanceStatus.STARTED
self.instance_repository.update(instance)
return instance
def stop_instance(self, instance_id: str) -> MPCInstance:
instance = self.instance_repository.read(instance_id)
container_ids = [instance.instance_id for instance in instance.containers]
if container_ids:
errors = self.onedocker_svc.stop_containers(container_ids)
error_msg = list(filter(lambda _: _[1], zip(container_ids, errors)))
if error_msg:
self.logger.error(
f"We encountered errors when stopping containers: {error_msg}"
)
instance.status = MPCInstanceStatus.CANCELED
self.instance_repository.update(instance)
self.logger.info(f"MPC instance {instance_id} has been successfully canceled.")
return instance
def get_instance(self, instance_id: str) -> MPCInstance:
self.logger.info(f"Getting MPC instance: {instance_id}")
return self.instance_repository.read(instance_id)
def update_instance(self, instance_id: str) -> MPCInstance:
instance = self.instance_repository.read(instance_id)
self.logger.info(f"Updating MPC instance: {instance_id}")
if instance.status in [
MPCInstanceStatus.COMPLETED,
MPCInstanceStatus.FAILED,
MPCInstanceStatus.CANCELED,
]:
return instance
# skip if no containers registered under instance yet
if instance.containers:
instance.containers = self._update_container_instances(instance.containers)
if len(instance.containers) != instance.num_workers:
self.logger.warning(
f"Instance {instance_id} has {len(instance.containers)} containers after update, but expecting {instance.num_workers} containers!"
)
instance.status = self._get_instance_status(instance)
self.instance_repository.update(instance)
return instance
async def _spin_up_containers_onedocker(
self,
game_name: str,
mpc_party: MPCParty,
num_containers: int,
game_args: Optional[List[Dict[str, Any]]] = None,
ip_addresses: Optional[List[str]] = None,
timeout: Optional[int] = None,
version: str = DEFAULT_BINARY_VERSION,
) -> List[ContainerInstance]:
if game_args is not None and len(game_args) != num_containers:
raise ValueError(
"The number of containers is not consistent with the number of game argument dictionary."
)
if ip_addresses is not None and len(ip_addresses) != num_containers:
raise ValueError(
"The number of containers is not consistent with number of ip addresses."
)
cmd_tuple_list = []
for i in range(num_containers):
game_arg = game_args[i] if game_args is not None else {}
server_ip = ip_addresses[i] if ip_addresses is not None else None
cmd_tuple_list.append(
self.mpc_game_svc.build_onedocker_args(
game_name=game_name,
mpc_party=mpc_party,
server_ip=server_ip,
**game_arg,
)
)
cmd_args_list = [cmd_args for (package_name, cmd_args) in cmd_tuple_list]
return await self.onedocker_svc.start_containers_async(
task_definition=self.task_definition,
package_name=cmd_tuple_list[0][0],
version=version,
cmd_args_list=cmd_args_list,
timeout=timeout,
)
def _update_container_instances(
self, containers: List[ContainerInstance]
) -> List[ContainerInstance]:
ids = [container.instance_id for container in containers]
return list(filter(None, self.container_svc.get_instances(ids)))
def _get_instance_status(self, instance: MPCInstance) -> MPCInstanceStatus:
if instance.status is MPCInstanceStatus.CANCELED:
return instance.status
status = MPCInstanceStatus.COMPLETED
for container in instance.containers:
if container.status == ContainerInstanceStatus.FAILED:
return MPCInstanceStatus.FAILED
if container.status == ContainerInstanceStatus.UNKNOWN:
return MPCInstanceStatus.UNKNOWN
if container.status == ContainerInstanceStatus.STARTED:
status = MPCInstanceStatus.STARTED
return status
|
the-stack_0_21939 | import random
import numpy as np
def read_data(pairs_file):
with open(pairs_file, 'r') as file:
tcrs = set()
peps = set()
all_pairs = []
for line in file:
tcr, pep = line.strip().split('\t')
# print(tcr, pep)
# Proper tcr and peptides
if '*' in tcr or '*' in pep:
continue
if '/' in pep:
continue
tcrs.add(tcr)
peps.add(pep)
all_pairs.append((tcr, pep))
train_pairs, test_pairs = train_test_split(all_pairs)
pass
return all_pairs, train_pairs, test_pairs
def train_test_split(all_pairs):
train_pairs = []
test_pairs = []
for pair in all_pairs:
# 80% train, 20% test
p = np.random.binomial(1, 0.8)
if p == 1:
train_pairs.append(pair)
else:
test_pairs.append(pair)
return train_pairs, test_pairs
def positive_examples(pairs):
examples = []
for pair in pairs:
tcr, pep = pair
weight = 1
examples.append((tcr, pep, 'p', weight))
return examples
def negative_examples(pairs, all_pairs, size):
examples = []
i = 0
# Get tcr and peps lists
tcrs = [tcr for (tcr, pep) in pairs]
peps = [pep for (tcr, pep) in pairs]
while i < size:
pep = random.choice(peps)
for j in range(5):
tcr = random.choice(tcrs)
attach = (tcr, pep) in all_pairs
if attach is False:
weight = 1
if (tcr, pep, 'n', weight) not in examples:
examples.append((tcr, pep, 'n', weight))
i += 1
return examples
def get_examples(pairs_file):
all_pairs, train_pairs, test_pairs = read_data(pairs_file)
train_pos = positive_examples(train_pairs)
train_neg = negative_examples(train_pairs, all_pairs, len(train_pos))
test_pos = positive_examples(test_pairs)
test_neg = negative_examples(test_pairs, all_pairs, len(test_pos))
return train_pos, train_neg, test_pos, test_neg
def load_data(pairs_file):
train_pos, train_neg, test_pos, test_neg = get_examples(pairs_file)
train = train_pos + train_neg
random.shuffle(train)
test = test_pos + test_neg
random.shuffle(test)
return train, test
# def check():
# pairs_file = 'data/McPAS_pairs.txt'
# train, test = load_data(pairs_file)
# print(len(train), train)
# print(len(test), test)
#
# check()
|
the-stack_0_21940 | """
Copyright 2021 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging, kopf
from seeder_ccloud import utils
from seeder_ccloud.openstack.openstack_helper import OpenstackHelper
from swiftclient import client as swiftclient
from keystoneclient import exceptions
config = utils.Config()
@kopf.on.update(config.crd_info['plural'], annotations={'operatorVersion': config.operator_version}, field='spec.openstack.swifts')
@kopf.on.create(config.crd_info['plural'], annotations={'operatorVersion': config.operator_version}, field='spec.openstack.swifts')
def seed_swifts_handler(memo: kopf.Memo, new, old, name, annotations, **_):
logging.info('seeding {} swift containers'.format(name))
if not config.is_dependency_successful(annotations):
raise kopf.TemporaryError('error seeding {}: {}'.format(name, 'dependencies error'), delay=30)
try:
changed = utils.get_changed_seeds(old, new)
Swift(memo['args'], memo['dry_run']).seed(changed)
except Exception as error:
raise kopf.TemporaryError('error seeding {}: {}'.format(name, error), delay=30)
class Swift():
def __init__(self, args, dry_run=False):
self.openstack = OpenstackHelper(args)
self.dry_run = dry_run
def seed(self, swifts):
for swift in swifts:
self._seed_swift(swift)
def _seed_swift(self, swift):
"""
Seeds swift account and containers for a project
:param project:
:param swift:
:param args:
:param sess:
:return:
"""
if 'enabled' in swift and swift['enabled']:
logging.debug(
"seeding swift account for project %s" % swift['project'])
try:
project_id = self.openstack.get_project_id(swift['project'])
project_name = swift['project']
session = self.openstack.get_session()
service_token = session.get_token()
# poor mans storage-url generation
try:
swift_endpoint = session.get_endpoint(
service_type='object-store',
interface=self.openstack.args.interface)
except exceptions.EndpointNotFound:
swift_endpoint = session.get_endpoint(
service_type='object-store',
interface='admin')
storage_url = swift_endpoint.split('/AUTH_')[
0] + '/AUTH_' + project_id
# Create swiftclient Connection
conn = swiftclient.Connection(session=session,
preauthurl=storage_url,
preauthtoken=service_token,
insecure=True)
try:
# see if the account already exists
conn.head_account()
except swiftclient.ClientException:
# nope, go create it
logging.info(
'creating swift account for project %s' % project_name)
if not self.dry_run:
swiftclient.put_object(storage_url, token=service_token)
# seed swift containers
if 'containers' in swift:
self.seed_swift_containers(project_name, swift['containers'],
conn)
except Exception as e:
logging.error(
"could not seed swift account for project %s: %s" % (
project_name, e))
raise
def seed_swift_containers(self, project, containers, conn):
"""
Creates swift containers for a project
:param project:
:param containers:
:param conn:
:return:
"""
logging.debug(
"seeding swift containers for project %s" % project)
for container in containers:
try:
# prepare the container metadata
headers = {}
if 'metadata' in container:
for meta in list(container['metadata'].keys()):
header = 'x-container-%s' % meta
headers[header] = str(container['metadata'][meta])
try:
# see if the container already exists
result = conn.head_container(container['name'])
for header in list(headers.keys()):
if headers[header] != result.get(header, ''):
logging.info(
"%s differs. update container %s/%s" % (
header, project,
container['name']))
if not self.dry_run:
conn.post_container(container['name'], headers)
break
except swiftclient.ClientException:
# nope, go create it
logging.info(
'creating swift container %s/%s' % (
project, container['name']))
if not self.dry_run:
conn.put_container(container['name'], headers)
except Exception as e:
logging.error(
"could not seed swift container for project %s: %s" % (
project, e))
raise |
the-stack_0_21943 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
This module contains the Scroll Flag panel
"""
# Standard library imports
from __future__ import division
from math import ceil
# Third party imports
from qtpy.QtCore import QSize, Qt, QTimer
from qtpy.QtGui import QPainter, QColor, QCursor
from qtpy.QtWidgets import (QStyle, QStyleOptionSlider, QApplication)
# Local imports
from spyder.api.panel import Panel
from spyder.plugins.completion.languageserver import DiagnosticSeverity
REFRESH_RATE = 1000
class ScrollFlagArea(Panel):
"""Source code editor's scroll flag area"""
WIDTH = 12
FLAGS_DX = 4
FLAGS_DY = 2
def __init__(self, editor):
Panel.__init__(self, editor)
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.scrollable = True
self.setMouseTracking(True)
# Define some attributes to be used for unit testing.
self._unit_testing = False
self._range_indicator_is_visible = False
self._alt_key_is_down = False
# Define permanent Qt colors that are needed for painting the flags
# and the slider range.
self._facecolors = {
'warning': QColor(editor.warning_color),
'error': QColor(editor.error_color),
'todo': QColor(editor.todo_color),
'breakpoint': QColor(editor.breakpoint_color),
'occurrence': QColor(editor.occurrence_color),
'found_results': QColor(editor.found_results_color)
}
self._edgecolors = {key: color.darker(120) for
key, color in self._facecolors.items()}
self._slider_range_color = QColor(Qt.gray)
self._slider_range_color.setAlphaF(.85)
self._slider_range_brush = QColor(Qt.gray)
self._slider_range_brush.setAlphaF(.5)
editor.sig_focus_changed.connect(self.update)
editor.sig_key_pressed.connect(self.keyPressEvent)
editor.sig_key_released.connect(self.keyReleaseEvent)
editor.sig_alt_left_mouse_pressed.connect(self.mousePressEvent)
editor.sig_alt_mouse_moved.connect(self.mouseMoveEvent)
editor.sig_leave_out.connect(self.update)
editor.sig_flags_changed.connect(self.delayed_update_flags)
editor.sig_theme_colors_changed.connect(self.update_flag_colors)
self._update_list_timer = QTimer(self)
self._update_list_timer.setSingleShot(True)
self._update_list_timer.timeout.connect(self.update_flags)
self._todo_list = []
self._code_analysis_list = []
self._breakpoint_list = []
@property
def slider(self):
"""This property holds whether the vertical scrollbar is visible."""
return self.editor.verticalScrollBar().isVisible()
def sizeHint(self):
"""Override Qt method"""
return QSize(self.WIDTH, 0)
def update_flag_colors(self, color_dict):
"""
Update the permanent Qt colors that are used for painting the flags
and the slider range with the new colors defined in the given dict.
"""
for name, color in color_dict.items():
self._facecolors[name] = QColor(color)
self._edgecolors[name] = self._facecolors[name].darker(120)
def delayed_update_flags(self):
"""
This function is called every time a flag is changed.
There is no need of updating the flags thousands of time by second,
as it is quite resources-heavy. This limits the calls to REFRESH_RATE.
"""
if self._update_list_timer.isActive():
return
self._update_list_timer.start(REFRESH_RATE)
def update_flags(self):
"""
Update flags list.
This parses the entire file, which can take a lot of time for
large files. Save all the flags in lists for painting during
paint events.
"""
self._todo_list = []
self._code_analysis_list = []
self._breakpoint_list = []
editor = self.editor
block = editor.document().firstBlock()
while block.isValid():
# Parse all lines in the file looking for something to flag.
data = block.userData()
if data:
if data.code_analysis:
self._code_analysis_list.append((block, data))
if data.todo:
self._todo_list.append((block, data))
if data.breakpoint:
self._breakpoint_list.append((block, data))
block = block.next()
self.update()
def paintEvent(self, event):
"""
Override Qt method.
Painting the scroll flag area
There is two cases:
- The scroll bar is moving, in which case paint all flags.
- The scroll bar is not moving, only paint flags corresponding
to visible lines.
"""
# The area in which the slider handle of the scrollbar may move.
groove_rect = self.get_scrollbar_groove_rect()
# The scrollbar's scale factor ratio between pixel span height and
# value span height
scale_factor = groove_rect.height() / self.get_scrollbar_value_height()
# The vertical offset of the scroll flag area relative to the
# top of the text editor.
offset = groove_rect.y()
# Note that we calculate the pixel metrics required to draw the flags
# here instead of using the convenience methods of the ScrollFlagArea
# for performance reason.
rect_x = ceil(self.FLAGS_DX / 2)
rect_w = self.WIDTH - self.FLAGS_DX
rect_h = self.FLAGS_DY
# Fill the whole painting area
painter = QPainter(self)
painter.fillRect(event.rect(), self.editor.sideareas_color)
editor = self.editor
# Check if the slider is visible
paint_local = not bool(self.slider)
# Define compute_flag_ypos to position the flags:
if not paint_local:
# Paint flags for the entire document
last_line = editor.document().lastBlock().firstLineNumber()
# The 0.5 offset is used to align the flags with the center of
# their corresponding text edit block before scaling.
first_y_pos = self.value_to_position(
0.5, scale_factor, offset) - self.FLAGS_DY / 2
last_y_pos = self.value_to_position(
last_line + 0.5, scale_factor, offset) - self.FLAGS_DY / 2
def compute_flag_ypos(block):
line_number = block.firstLineNumber()
frac = line_number / last_line
pos = first_y_pos + frac * (last_y_pos - first_y_pos)
return ceil(pos)
else:
# Only paint flags for visible lines
visible_lines = [val[1] for val in editor.visible_blocks]
if not visible_lines:
# Nothing to do
return
min_line = min(visible_lines)
max_line = max(visible_lines)
def compute_flag_ypos(block):
# When the vertical scrollbar is not visible, the flags are
# vertically aligned with the center of their corresponding
# text block with no scaling.
top = editor.blockBoundingGeometry(block).translated(
editor.contentOffset()).top()
bottom = top + editor.blockBoundingRect(block).height()
middle = (top + bottom)/2
return ceil(middle-self.FLAGS_DY/2)
def should_paint_block(block):
"""Check if the block should be painted."""
if not block.isValid():
return False
# Don't paint local flags outside of the window
if paint_local and not (
min_line <= block.blockNumber() + 1 <= max_line):
return False
return True
# Paint all the code analysis flags
for block, data in self._code_analysis_list:
if not should_paint_block(block):
continue
# Paint the warnings
for source, code, severity, message in data.code_analysis:
error = severity == DiagnosticSeverity.ERROR
if error:
painter.setBrush(self._facecolors['error'])
painter.setPen(self._edgecolors['error'])
break
else:
painter.setBrush(self._facecolors['warning'])
painter.setPen(self._edgecolors['warning'])
rect_y = compute_flag_ypos(block)
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
# Paint all the todo flags
for block, data in self._todo_list:
if not should_paint_block(block):
continue
# Paint the todos
rect_y = compute_flag_ypos(block)
painter.setBrush(self._facecolors['todo'])
painter.setPen(self._edgecolors['todo'])
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
# Paint all the breakpoints flags
for block, data in self._breakpoint_list:
if not should_paint_block(block):
continue
# Paint the breakpoints
rect_y = compute_flag_ypos(block)
painter.setBrush(self._facecolors['breakpoint'])
painter.setPen(self._edgecolors['breakpoint'])
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
# Paint the occurrences of selected word flags
if editor.occurrences:
painter.setBrush(self._facecolors['occurrence'])
painter.setPen(self._edgecolors['occurrence'])
for line_number in editor.occurrences:
if paint_local and not (
min_line <= line_number + 1 <= max_line):
continue
block = editor.document().findBlockByNumber(line_number)
rect_y = compute_flag_ypos(block)
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
# Paint the found results flags
if editor.found_results:
painter.setBrush(self._facecolors['found_results'])
painter.setPen(self._edgecolors['found_results'])
for line_number in editor.found_results:
if paint_local and not (
min_line <= line_number + 1 <= max_line):
continue
block = editor.document().findBlockByNumber(line_number)
rect_y = compute_flag_ypos(block)
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
# Paint the slider range
if not self._unit_testing:
alt = QApplication.queryKeyboardModifiers() & Qt.AltModifier
else:
alt = self._alt_key_is_down
if self.slider:
cursor_pos = self.mapFromGlobal(QCursor().pos())
is_over_self = self.rect().contains(cursor_pos)
is_over_editor = editor.rect().contains(
editor.mapFromGlobal(QCursor().pos()))
# We use QRect.contains instead of QWidget.underMouse method to
# determined if the cursor is over the editor or the flag scrollbar
# because the later gives a wrong result when a mouse button
# is pressed.
if is_over_self or (alt and is_over_editor):
painter.setPen(self._slider_range_color)
painter.setBrush(self._slider_range_brush)
x, y, width, height = self.make_slider_range(
cursor_pos, scale_factor, offset, groove_rect)
painter.drawRect(x, y, width, height)
self._range_indicator_is_visible = True
else:
self._range_indicator_is_visible = False
def enterEvent(self, event):
"""Override Qt method"""
self.update()
def leaveEvent(self, event):
"""Override Qt method"""
self.update()
def mouseMoveEvent(self, event):
"""Override Qt method"""
self.update()
def mousePressEvent(self, event):
"""Override Qt method"""
if self.slider and event.button() == Qt.LeftButton:
vsb = self.editor.verticalScrollBar()
value = self.position_to_value(event.pos().y())
vsb.setValue(int(value-vsb.pageStep()/2))
def keyReleaseEvent(self, event):
"""Override Qt method."""
if event.key() == Qt.Key_Alt:
self._alt_key_is_down = False
self.update()
def keyPressEvent(self, event):
"""Override Qt method"""
if event.key() == Qt.Key_Alt:
self._alt_key_is_down = True
self.update()
def get_vertical_offset(self):
"""
Return the vertical offset of the scroll flag area relative to the
top of the text editor.
"""
groove_rect = self.get_scrollbar_groove_rect()
return groove_rect.y()
def get_slider_min_height(self):
"""
Return the minimum height of the slider range based on that set for
the scroll bar's slider.
"""
return QApplication.instance().style().pixelMetric(
QStyle.PM_ScrollBarSliderMin)
def get_scrollbar_groove_rect(self):
"""Return the area in which the slider handle may move."""
vsb = self.editor.verticalScrollBar()
style = QApplication.instance().style()
opt = QStyleOptionSlider()
vsb.initStyleOption(opt)
# Get the area in which the slider handle may move.
groove_rect = style.subControlRect(
QStyle.CC_ScrollBar, opt, QStyle.SC_ScrollBarGroove, self)
return groove_rect
def get_scrollbar_position_height(self):
"""Return the pixel span height of the scrollbar area in which
the slider handle may move"""
groove_rect = self.get_scrollbar_groove_rect()
return float(groove_rect.height())
def get_scrollbar_value_height(self):
"""Return the value span height of the scrollbar"""
vsb = self.editor.verticalScrollBar()
return vsb.maximum() - vsb.minimum() + vsb.pageStep()
def get_scale_factor(self):
"""Return scrollbar's scale factor:
ratio between pixel span height and value span height"""
return (self.get_scrollbar_position_height() /
self.get_scrollbar_value_height())
def value_to_position(self, y, scale_factor, offset):
"""Convert value to position in pixels"""
vsb = self.editor.verticalScrollBar()
return int((y - vsb.minimum()) * scale_factor + offset)
def position_to_value(self, y):
"""Convert position in pixels to value"""
vsb = self.editor.verticalScrollBar()
offset = self.get_vertical_offset()
return vsb.minimum() + max([0, (y - offset) / self.get_scale_factor()])
def make_slider_range(self, cursor_pos, scale_factor, offset, groove_rect):
"""
Return the slider x and y positions and the slider width and height.
"""
# The slider range indicator position follows the mouse vertical
# position while its height corresponds to the part of the file that
# is currently visible on screen.
vsb = self.editor.verticalScrollBar()
slider_height = self.value_to_position(
vsb.pageStep(), scale_factor, offset) - offset
slider_height = max(slider_height, self.get_slider_min_height())
# Calcul the minimum and maximum y-value to constraint the slider
# range indicator position to the height span of the scrollbar area
# where the slider may move.
min_ypos = offset
max_ypos = groove_rect.height() + offset - slider_height
# Determine the bounded y-position of the slider rect.
slider_y = max(min_ypos, min(max_ypos,
ceil(cursor_pos.y()-slider_height/2)))
return 1, slider_y, self.WIDTH - 2, slider_height
def wheelEvent(self, event):
"""Override Qt method"""
self.editor.wheelEvent(event)
def set_enabled(self, state):
"""Toggle scroll flag area visibility"""
self.enabled = state
self.setVisible(state)
|
the-stack_0_21945 | import argparse
import gzip
import sys
import itertools
'''
'''
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bio_pub', required=True, help='pubtator bio offset file')
parser.add_argument('-o', '--output_file', required=True, help='write results to this file')
parser.add_argument('-n', '--entity_filter', help='single column file of entities to keep')
parser.add_argument('-e', '--entity_pair_filter', help='2 column tsv of entity pairs to export')
parser.add_argument('-p', '--pubmed_filter', help='only export these pubmed ids')
parser.add_argument('-r', '--relation_file', help='4 col tsv containing relations to add to output. '
'[e1 \t e2 \t relation \t docid]')
args = parser.parse_args()
current_annotations = []
current_pub = ''
line_num = 0
done = False
valid_annotations = 0
valid_pubs = 0
exported_annotations = 0
exported_abstracts = 0
total_abstracts = 0
print ('Reading in filter files')
pubmed_filter, entity_filter, ep_filter, doc_relation_map = None, None, None, None
if args.pubmed_filter:
with open(args.pubmed_filter) as f:
pubmed_filter = set([l.strip() for l in f])
if args.entity_filter:
with open(args.entity_filter) as f:
entity_filter = set([l.strip() for l in f])
if args.entity_pair_filter:
with open(args.entity_pair_filter) as f:
ep_filter = set([(l.strip().split('\t')[0], l.strip().split('\t')[1]) for l in f])
if args.relation_file:
print('Reading in relation file %s' % args.relation_file)
with (gzip.open(args.relation_file, 'rb') if args.relation_file.endswith('gz')
else open(args.relation_file, 'r')) as rel_file:
doc_relation_map = {(doc_id, e1, e2): rel for e1, e2, rel, doc_id in [_l.strip().split() for _l in rel_file]}
with (gzip.open(args.output_file, 'wb') if args.output_file.endswith('.gz') else open(args.output_file, 'r')) as out_f:
with (gzip.open(args.bio_pub, 'rb') if args.bio_pub.endswith('.gz') else open(args.bio_pub, 'r')) as f:
for line in f:
if line_num == 0:
title = line
doc_id = title.split('|')[0]
abstract = f.readline()
line_num += 1
if line_num % 10000 == 0:
sys.stdout.write('\rline: %dK exported_annotations: %dK '
'exported_abstracts: %dK total_abstracts : %dK'
% (line_num/1000, exported_annotations/1000,
exported_abstracts/1000, total_abstracts/1000))
sys.stdout.flush()
# new pub
if len(line.strip()) == 0:
# do something with last annotations
if valid_annotations > 0:
valid_pubs += 1
replaced_text = []
last = 0
annotation_map = {}
entities_in_abstract = [_kg_id for _kg_id, _line in current_annotations]
if ep_filter:
matched_eps = [pair for pair in itertools.product(entities_in_abstract, repeat=2)
if pair in ep_filter]
if doc_relation_map:
matched_relations = set(['\t'.join([doc_id, doc_relation_map[(doc_id, e1, e2)], e1, e2])
for e1, e2 in itertools.product(entities_in_abstract, repeat=2)
if (doc_id, e1, e2) in doc_relation_map])
# if example matches the filters or there are no filters
if (not pubmed_filter or pub_id in pubmed_filter) \
and (not ep_filter or matched_eps) \
and (not doc_relation_map or matched_relations):
exported_abstracts += 1
# write sentences and annotations to file
out_str = '%s%s' % (title, abstract)
exported_annotations += len(current_annotations)
out_str += ''.join([_line for _kg_id, _line in current_annotations])
out_str += '\n'.join([_line for _line in matched_relations])
out_f.write(out_str + '\n\n')
total_abstracts += 1
# reset annotations for next pub
current_annotations = []
valid_annotations = 0
title = f.readline()
doc_id = title.split('|')[0]
abstract = f.readline()
else:
parts = line.strip().split('\t')
if len(parts) == 6:
pub_id, start, end, mention, label, kg_id = parts
kg_id = kg_id.replace('MESH:', '')
line = line.replace('MESH:', '')
current_annotations.append((kg_id, line))
if not entity_filter or kg_id in entity_filter:
valid_annotations += 1
print('Done') |
the-stack_0_21947 | import unittest
from utils import date_utils
from datetime import datetime
from enums.feed_enums import FeedType
from errors.custom_exceptions import InputDataError
class TestDateUtils(unittest.TestCase):
def test_get_formatted_date(self):
today_date = date_utils.get_formatted_date(FeedType.ITEM)
try:
datetime.strptime(today_date, '%Y%m%d')
except ValueError:
self.fail('Invalid date format: %s' % today_date)
def test_validate_date_exception(self):
with self.assertRaises(InputDataError):
date_utils.validate_date('2019/02/01', FeedType.ITEM)
def test_validate_date(self):
date_utils.validate_date('20190201', FeedType.ITEM)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_21948 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.mgmt.core import ARMPipelineClient
from msrest import Serializer, Deserializer
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from ._configuration import DnsManagementClientConfiguration
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class DnsManagementClient(MultiApiClientMixin, _SDKClient):
"""The DNS Management Client.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Specifies the Azure subscription ID, which uniquely identifies the Microsoft Azure subscription.
:type subscription_id: str
:param str api_version: API version to use if no profile is provided, or if
missing in profile.
:param str base_url: Service URL
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2018-05-01'
_PROFILE_TAG = "azure.mgmt.dns.DnsManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
api_version=None,
base_url=None,
profile=KnownProfiles.default,
**kwargs # type: Any
):
if not base_url:
base_url = 'https://management.azure.com'
self._config = DnsManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(DnsManagementClient, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2016-04-01: :mod:`v2016_04_01.models<azure.mgmt.dns.v2016_04_01.models>`
* 2018-03-01-preview: :mod:`v2018_03_01_preview.models<azure.mgmt.dns.v2018_03_01_preview.models>`
* 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.dns.v2018_05_01.models>`
"""
if api_version == '2016-04-01':
from .v2016_04_01 import models
return models
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview import models
return models
elif api_version == '2018-05-01':
from .v2018_05_01 import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def dns_resource_reference(self):
"""Instance depends on the API version:
* 2018-05-01: :class:`DnsResourceReferenceOperations<azure.mgmt.dns.v2018_05_01.operations.DnsResourceReferenceOperations>`
"""
api_version = self._get_api_version('dns_resource_reference')
if api_version == '2018-05-01':
from .v2018_05_01.operations import DnsResourceReferenceOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dns_resource_reference'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def record_sets(self):
"""Instance depends on the API version:
* 2016-04-01: :class:`RecordSetsOperations<azure.mgmt.dns.v2016_04_01.operations.RecordSetsOperations>`
* 2018-03-01-preview: :class:`RecordSetsOperations<azure.mgmt.dns.v2018_03_01_preview.operations.RecordSetsOperations>`
* 2018-05-01: :class:`RecordSetsOperations<azure.mgmt.dns.v2018_05_01.operations.RecordSetsOperations>`
"""
api_version = self._get_api_version('record_sets')
if api_version == '2016-04-01':
from .v2016_04_01.operations import RecordSetsOperations as OperationClass
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview.operations import RecordSetsOperations as OperationClass
elif api_version == '2018-05-01':
from .v2018_05_01.operations import RecordSetsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'record_sets'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def zones(self):
"""Instance depends on the API version:
* 2016-04-01: :class:`ZonesOperations<azure.mgmt.dns.v2016_04_01.operations.ZonesOperations>`
* 2018-03-01-preview: :class:`ZonesOperations<azure.mgmt.dns.v2018_03_01_preview.operations.ZonesOperations>`
* 2018-05-01: :class:`ZonesOperations<azure.mgmt.dns.v2018_05_01.operations.ZonesOperations>`
"""
api_version = self._get_api_version('zones')
if api_version == '2016-04-01':
from .v2016_04_01.operations import ZonesOperations as OperationClass
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview.operations import ZonesOperations as OperationClass
elif api_version == '2018-05-01':
from .v2018_05_01.operations import ZonesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'zones'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
|
the-stack_0_21949 | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Jigar Tarpara and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class WorkstationPriceUpdateTool(Document):
def get_workstation_list(self):
filter = {}
if self.department:
filter['department'] = self.department
doc = frappe.get_all("Workstation", filter, ["name","pni_rate"])
return doc
def validate(self):
for row in self.workstation_price:
workstation = frappe.get_doc("Workstation",row.workstation)
workstation.pni_rate = row.pni_rate
workstation.save()
self.workstation_price = []
self.department = ""
frappe.msgprint("Workstation Price Updated!")
|
the-stack_0_21953 | # test mod_md acme terms-of-service handling
import pytest
from .md_env import MDTestEnv
@pytest.mark.skipif(condition=not MDTestEnv.has_acme_server(),
reason="no ACME test server configured")
class TestRegUpdate:
NAME1 = "greenbytes2.de"
NAME2 = "test-100.com"
@pytest.fixture(autouse=True, scope='function')
def _method_scope(self, env):
env.clear_store()
# add managed domains
domains = [
[self.NAME1, "www.greenbytes2.de", "mail.greenbytes2.de"],
[self.NAME2, "test-101.com", "test-102.com"]
]
for dns in domains:
env.a2md(["-a", env.acme_url, "add"] + dns)
def teardown_method(self, method):
print("teardown_method: %s" % method.__name__)
# test case: update domains
def test_110_000(self, env):
dns = ["foo.de", "bar.de"]
output1 = env.a2md(["-vvvv", "update", self.NAME1, "domains"] + dns).json['output']
assert len(output1) == 1
env.check_json_contains(output1[0], {
"name": self.NAME1,
"domains": dns,
"contacts": [],
"ca": {
"url": env.acme_url,
"proto": "ACME"
},
"state": env.MD_S_INCOMPLETE
})
assert env.a2md(["list"]).json['output'][0] == output1[0]
# test case: remove all domains
def test_110_001(self, env):
assert env.a2md(["update", self.NAME1, "domains"]).exit_code == 1
# test case: update domains with invalid DNS
@pytest.mark.parametrize("invalid_dns", [
"tld", "white sp.ace", "invalid.*.wildcard.com", "k\xc3ller.idn.com"
])
def test_110_002(self, env, invalid_dns):
assert env.a2md(["update", self.NAME1, "domains", invalid_dns]).exit_code == 1
# test case: update domains with overlapping DNS list
def test_110_003(self, env):
dns = [self.NAME1, self.NAME2]
assert env.a2md(["update", self.NAME1, "domains"] + dns).exit_code == 1
# test case: update with subdomains
def test_110_004(self, env):
dns = ["test-foo.com", "sub.test-foo.com"]
md = env.a2md(["update", self.NAME1, "domains"] + dns).json['output'][0]
assert md['name'] == self.NAME1
assert md['domains'] == dns
# test case: update domains with duplicates
def test_110_005(self, env):
dns = [self.NAME1, self.NAME1, self.NAME1]
md = env.a2md(["update", self.NAME1, "domains"] + dns).json['output'][0]
assert md['name'] == self.NAME1
assert md['domains'] == [self.NAME1]
# test case: remove domains with punycode
def test_110_006(self, env):
dns = [self.NAME1, "xn--kller-jua.punycode.de"]
md = env.a2md(["update", self.NAME1, "domains"] + dns).json['output'][0]
assert md['name'] == self.NAME1
assert md['domains'] == dns
# test case: update non-existing managed domain
def test_110_007(self, env):
assert env.a2md(["update", "test-foo.com", "domains", "test-foo.com"]).exit_code == 1
# test case: update domains with DNS wildcard
@pytest.mark.parametrize("wild_dns", [
"*.wildcard.com"
])
def test_110_008(self, env, wild_dns):
assert env.a2md(["update", self.NAME1, "domains", wild_dns]).exit_code == 0
# --------- update ca ---------
# test case: update CA URL
def test_110_100(self, env):
url = "http://localhost.com:9999"
output = env.a2md(["update", self.NAME1, "ca", url]).json['output']
assert len(output) == 1
env.check_json_contains(output[0], {
"name": self.NAME1,
"domains": [self.NAME1, "www.greenbytes2.de", "mail.greenbytes2.de"],
"contacts": [],
"ca": {
"url": url,
"proto": "ACME"
},
"state": env.MD_S_INCOMPLETE
})
# test case: update CA with invalid URL
@pytest.mark.parametrize("invalid_url", [
"no.schema/path", "http://white space/path", "http://bad.port:-1/path"
])
def test_110_101(self, env, invalid_url):
assert env.a2md(["update", self.NAME1, "ca", invalid_url]).exit_code == 1
# test case: update ca protocol
def test_110_102(self, env):
md = env.a2md(["update", self.NAME1, "ca", env.acme_url, "FOO"]).json['output'][0]
env.check_json_contains(md['ca'], {
"url": env.acme_url,
"proto": "FOO"
})
assert md['state'] == 1
# test case: update account ID
def test_110_200(self, env):
acc_id = "test.account.id"
output = env.a2md(["update", self.NAME1, "account", acc_id]).json['output']
assert len(output) == 1
env.check_json_contains(output[0], {
"name": self.NAME1,
"domains": [self.NAME1, "www.greenbytes2.de", "mail.greenbytes2.de"],
"contacts": [],
"ca": {
"account": acc_id,
"url": env.acme_url,
"proto": "ACME"
},
"state": env.MD_S_INCOMPLETE
})
# test case: remove account ID
def test_110_201(self, env):
assert env.a2md(["update", self.NAME1, "account", "test.account.id"]).exit_code == 0
md = env.a2md(["update", self.NAME1, "account"]).json['output'][0]
env.check_json_contains(md['ca'], {
"url": env.acme_url,
"proto": "ACME"
})
assert md['state'] == 1
# test case: change existing account ID
def test_110_202(self, env):
assert env.a2md(["update", self.NAME1, "account", "test.account.id"]).exit_code == 0
md = env.a2md(["update", self.NAME1, "account", "foo.test.com"]).json['output'][0]
env.check_json_contains(md['ca'], {
"account": "foo.test.com",
"url": env.acme_url,
"proto": "ACME"
})
assert md['state'] == 1
# test case: ignore additional argument
def test_110_203(self, env):
md = env.a2md(["update", self.NAME1, "account", "test.account.id",
"test2.account.id"]).json['output'][0]
env.check_json_contains(md['ca'], {
"account": "test.account.id",
"url": env.acme_url,
"proto": "ACME"
})
assert md['state'] == 1
# test case: add contact info
def test_110_300(self, env):
mail = "[email protected]"
output = env.a2md(["update", self.NAME1, "contacts", mail]).json['output']
assert len(output) == 1
env.check_json_contains(output[0], {
"name": self.NAME1,
"domains": [self.NAME1, "www.greenbytes2.de", "mail.greenbytes2.de"],
"contacts": ["mailto:" + mail],
"ca": {
"url": env.acme_url,
"proto": "ACME"
},
"state": env.MD_S_INCOMPLETE
})
# test case: add multiple contact info, preserve order
def test_110_301(self, env):
mail = ["[email protected]", "[email protected]"]
md = env.a2md(["update", self.NAME1, "contacts"] + mail).json['output'][0]
assert md['contacts'] == ["mailto:" + mail[0], "mailto:" + mail[1]]
assert md['state'] == 1
# test case: must not remove contact info
def test_110_302(self, env):
assert env.a2md(["update", self.NAME1, "contacts", "[email protected]"]).exit_code == 0
assert env.a2md(["update", self.NAME1, "contacts"]).exit_code == 1
# test case: replace existing contact info
def test_110_303(self, env):
assert env.a2md(["update", self.NAME1, "contacts", "[email protected]"]).exit_code == 0
md = env.a2md(["update", self.NAME1, "contacts", "[email protected]"]).json['output'][0]
assert md['contacts'] == ["mailto:[email protected]"]
assert md['state'] == 1
# test case: use invalid mail address
@pytest.mark.parametrize("invalid_mail", [
"no.at.char", "with [email protected]", "missing.host@", "@missing.localpart.de",
"[email protected]", "double@[email protected]"
])
def test_110_304(self, env, invalid_mail):
# SEI: Uhm, es ist nicht sinnvoll, eine komplette verification von
# https://tools.ietf.org/html/rfc822 zu bauen?
assert env.a2md(["update", self.NAME1, "contacts", invalid_mail]).exit_code == 1
# test case: respect urls as given
@pytest.mark.parametrize("url", [
"mailto:[email protected]", "wrong://[email protected]"])
def test_110_305(self, env, url):
md = env.a2md(["update", self.NAME1, "contacts", url]).json['output'][0]
assert md['contacts'] == [url]
assert md['state'] == 1
# test case: add tos agreement
def test_110_400(self, env):
output = env.a2md(["update", self.NAME1, "agreement", env.acme_tos]).json['output']
assert len(output) == 1
env.check_json_contains(output[0], {
"name": self.NAME1,
"domains": [self.NAME1, "www.greenbytes2.de", "mail.greenbytes2.de"],
"contacts": [],
"ca": {
"url": env.acme_url,
"proto": "ACME",
"agreement": env.acme_tos
},
"state": env.MD_S_INCOMPLETE
})
# test case: remove tos agreement
def test_110_402(self, env):
assert env.a2md(["update", self.NAME1, "agreement", env.acme_tos]).exit_code == 0
md = env.a2md(["update", self.NAME1, "agreement"]).json['output'][0]
env.check_json_contains(md['ca'], {
"url": env.acme_url,
"proto": "ACME"
})
assert md['state'] == 1
# test case: ignore additional arguments
def test_110_403(self, env):
md = env.a2md(["update", self.NAME1, "agreement",
env.acme_tos, "http://invalid.tos/"]).json['output'][0]
env.check_json_contains(md['ca'], {
"url": env.acme_url,
"proto": "ACME",
"agreement": env.acme_tos
})
assert md['state'] == 1
# test case: update agreement with invalid URL
@pytest.mark.parametrize("invalid_url", [
"no.schema/path", "http://white space/path", "http://bad.port:-1/path"
])
def test_110_404(self, env, invalid_url):
assert env.a2md(["update", self.NAME1, "agreement", invalid_url]).exit_code == 1
|
the-stack_0_21955 | # -*-coding:utf-8 -*-
"""
Created on 2015-05-20
@author: Danny<[email protected]>
DannyWork Project
"""
from __future__ import unicode_literals
from django import forms
from ckeditor.widgets import CKEditorWidget
from .models import BlogComment, Blog
class BlogForm(forms.ModelForm):
"""
博客 Form
"""
class Meta:
model = Blog
fields = ['title', 'theme', 'cate', 'topic', 'summary', 'content', 'tags', 'is_active']
widgets = {
'summary': CKEditorWidget()
}
class CommentForm(forms.Form):
"""
博客评论 Form
"""
content = forms.CharField(max_length=500, error_messages={
'required': '请输入您的留言',
'max_length': '请确保您的留言不超过500个字符'
})
username = forms.CharField(required=False, max_length=64, error_messages={
'max_length': '请确保您的称呼不超过64个字符'
})
email = forms.EmailField(error_messages={
'invalid': '请正确输入您的电子邮箱以方便与您取得联系',
'required': '请输入您的电子邮箱以方便与您取得联系'
})
def save(self):
return BlogComment.objects.create(blog=self.initial.get('blog'),
user=self.initial.get('user'),
ip_address=self.initial.get('ip_address'),
comment=self.cleaned_data.get('content'),
username=self.cleaned_data.get('username'),
email=self.cleaned_data.get('email'))
|
the-stack_0_21956 | import os
import sys
sys.path.append('..')
sys.path.append('.')
import mitogen
VERSION = '%s.%s.%s' % mitogen.__version__
author = u'Network Genomics'
copyright = u'2019, Network Genomics'
exclude_patterns = ['_build', '.venv']
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinxcontrib.programoutput', 'domainrefs']
# get rid of version from <title>, it messes with piwik
html_title = 'Mitogen Documentation'
html_show_copyright = False
html_show_sourcelink = False
html_show_sphinx = False
html_sidebars = {'**': ['globaltoc.html', 'github.html']}
html_additional_pages = {'ansible': 'ansible.html'}
html_static_path = ['_static']
html_theme = 'alabaster'
html_theme_options = {
'font_family': "Georgia, serif",
'head_font_family': "Georgia, serif",
'fixed_sidebar': True,
'show_powered_by': False,
'pink_2': 'fffafaf',
'pink_1': '#fff0f0',
}
htmlhelp_basename = 'mitogendoc'
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
language = None
master_doc = 'toc'
project = u'Mitogen'
pygments_style = 'sphinx'
release = VERSION
source_suffix = '.rst'
templates_path = ['_templates']
todo_include_todos = False
version = VERSION
domainrefs = {
'gh:commit': {
'text': '%s',
'url': 'https://github.com/dw/mitogen/commit/%s',
},
'gh:issue': {
'text': '#%s',
'url': 'https://github.com/dw/mitogen/issues/%s',
},
'gh:pull': {
'text': '#%s',
'url': 'https://github.com/dw/mitogen/pull/%s',
},
'ans:mod': {
'text': '%s module',
'url': 'https://docs.ansible.com/ansible/latest/modules/%s_module.html',
},
'ans:conn': {
'text': '%s connection plug-in',
'url': 'https://docs.ansible.com/ansible/latest/plugins/connection/%s.html',
},
'freebsd:man2': {
'text': '%s(2)',
'url': 'https://www.freebsd.org/cgi/man.cgi?query=%s',
},
'linux:man1': {
'text': '%s(1)',
'url': 'http://man7.org/linux/man-pages/man1/%s.1.html',
},
'linux:man2': {
'text': '%s(2)',
'url': 'http://man7.org/linux/man-pages/man2/%s.2.html',
},
'linux:man3': {
'text': '%s(3)',
'url': 'http://man7.org/linux/man-pages/man3/%s.3.html',
},
'linux:man7': {
'text': '%s(7)',
'url': 'http://man7.org/linux/man-pages/man7/%s.7.html',
},
}
rst_epilog = """
.. |mitogen_version| replace:: %(VERSION)s
.. |mitogen_url| replace:: `mitogen-%(VERSION)s.tar.gz <https://networkgenomics.com/try/mitogen-%(VERSION)s.tar.gz>`__
""" % locals()
|
the-stack_0_21957 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import Any, TYPE_CHECKING, Union
from azure.core.pipeline.policies import (
AsyncBearerTokenCredentialPolicy,
HttpLoggingPolicy,
)
from .._user_agent import USER_AGENT
from .._generated._generated_ledger.v0_1_preview.aio import (
ConfidentialLedgerClient as _ConfidentialLedgerClient,
)
from .._shared import ConfidentialLedgerCertificateCredential, DEFAULT_VERSION
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
class AsyncConfidentialLedgerClientBase(object):
def __init__(
self,
*,
endpoint: str,
credential: Union[ConfidentialLedgerCertificateCredential, "TokenCredential"],
ledger_certificate_path: str,
**kwargs: Any
) -> None:
client = kwargs.get("generated_client")
if client:
# caller provided a configured client -> nothing left to initialize
self._client = client
return
if not endpoint:
raise ValueError("Expected endpoint to be a non-empty string")
if not credential:
raise ValueError("Expected credential to not be None")
if not isinstance(ledger_certificate_path, str):
raise TypeError("ledger_certificate_path must be a string")
if ledger_certificate_path == "":
raise ValueError(
"If not None, ledger_certificate_path must be a non-empty string"
)
endpoint = endpoint.strip(" /")
try:
if not endpoint.startswith("https://"):
self._endpoint = "https://" + endpoint
else:
self._endpoint = endpoint
except AttributeError:
raise ValueError("Confidential Ledger URL must be a string.")
self.api_version = kwargs.pop("api_version", DEFAULT_VERSION)
if not kwargs.get("transport", None):
# Customize the transport layer to use client certificate authentication and validate
# a self-signed TLS certificate.
if isinstance(credential, ConfidentialLedgerCertificateCredential):
# The async version of the client seems to expect a sequence of filenames.
# azure/core/pipeline/transport/_aiohttp.py:163
# > ssl_ctx.load_cert_chain(*cert)
kwargs["connection_cert"] = (credential.certificate_path,)
kwargs["connection_verify"] = ledger_certificate_path
http_logging_policy = HttpLoggingPolicy(**kwargs)
http_logging_policy.allowed_header_names.update(
{
"x-ms-keyvault-network-info",
"x-ms-keyvault-region",
"x-ms-keyvault-service-version",
}
)
if not isinstance(credential, ConfidentialLedgerCertificateCredential):
kwargs["authentication_policy"] = kwargs.pop(
"authentication_policy",
AsyncBearerTokenCredentialPolicy(
credential,
"https://confidential-ledger.azure.com/.default",
**kwargs
),
)
try:
self._client = _ConfidentialLedgerClient(
self._endpoint,
api_version=self.api_version,
http_logging_policy=http_logging_policy,
sdk_moniker=USER_AGENT,
**kwargs
)
except NotImplementedError:
raise NotImplementedError(
"This package doesn't support API version '{}'. ".format(
self.api_version
)
+ "Supported versions: 0.1-preview"
)
@property
def endpoint(self) -> str:
"""The URL this client is connected to."""
return self._endpoint
async def __aenter__(self) -> "AsyncConfidentialLedgerClientBase":
await self._client.__aenter__()
return self
async def __aexit__(self, *args: Any) -> None:
await self._client.__aexit__(*args)
async def close(self) -> None:
"""Close sockets opened by the client.
Calling this method is unnecessary when using the client as a context manager.
"""
await self._client.close()
|
the-stack_0_21958 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf8") as fh:
long_description = fh.read()
setup(
name="modin",
version="0.4.0",
description="Modin: Make your pandas code run faster by changing one line of code.",
packages=find_packages(),
url="https://github.com/modin-project/modin",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=["pandas==0.24.1", "ray==0.6.2", "numpy<=1.15.0", "typing"],
extras_require={
# can be installed by pip install modin[dask]
"dask": ["dask==1.0.0", "distributed==1.25.0"],
# can be install by pip install modin[out_of_core]
"out_of_core": ["psutil==5.4.8"],
},
)
|
the-stack_0_21959 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualWAN(Resource):
"""VirtualWAN Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param disable_vpn_encryption: Vpn encryption to be disabled or not.
:type disable_vpn_encryption: bool
:ivar virtual_hubs: List of VirtualHubs in the VirtualWAN.
:vartype virtual_hubs:
list[~azure.mgmt.network.v2018_04_01.models.SubResource]
:ivar vpn_sites:
:vartype vpn_sites:
list[~azure.mgmt.network.v2018_04_01.models.SubResource]
:param provisioning_state: The provisioning state of the resource.
Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
:type provisioning_state: str or
~azure.mgmt.network.v2018_04_01.models.ProvisioningState
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_hubs': {'readonly': True},
'vpn_sites': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'disable_vpn_encryption': {'key': 'properties.disableVpnEncryption', 'type': 'bool'},
'virtual_hubs': {'key': 'properties.virtualHubs', 'type': '[SubResource]'},
'vpn_sites': {'key': 'properties.vpnSites', 'type': '[SubResource]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualWAN, self).__init__(**kwargs)
self.disable_vpn_encryption = kwargs.get('disable_vpn_encryption', None)
self.virtual_hubs = None
self.vpn_sites = None
self.provisioning_state = kwargs.get('provisioning_state', None)
self.etag = None
|
the-stack_0_21960 | import dash_html_components as html
from dash.dependencies import Output, Input
import pages.rocket_builder.rocket_builder_page as rb
from app import app
from conversions import metric_convert
inputs = {
'number of fins': {'unit': '', 'default_value': 4, 'input_prefix': '-', 'si_prefix': '-'},
'root chord': {'unit': 'cm', 'default_value': 5, 'input_prefix': 'c', 'si_prefix': '-'},
'tip chord': {'unit': 'cm', 'default_value': 2, 'input_prefix': 'c', 'si_prefix': '-'},
'fin height': {'unit': 'cm', 'default_value': 4.5, 'input_prefix': 'c', 'si_prefix': '-'},
'sweep length': {'unit': 'cm', 'default_value': 1.5, 'input_prefix': 'c', 'si_prefix': '-'}
}
def get_layout(data):
layout = [html.H3('Fins')]
layout.extend([rb.simple_input(i,
metric_convert(data[i.replace(' ', '_')],
inputs[i]['si_prefix'],
inputs[i]['input_prefix']),
inputs[i]['unit'])
for i in inputs
if i != 'sweep length'])
sweep_length = 'sweep length'
layout.append(rb.simple_input(sweep_length,
metric_convert(data[sweep_length.replace(' ', '_')],
inputs[sweep_length]['si_prefix'],
inputs[sweep_length]['input_prefix']),
inputs[sweep_length]['unit'],
min=-10 ** 9))
return layout
@app.callback(
Output('fin-builder-data', 'data'),
Input('number-of-fins-input', 'value'),
Input('root-chord-input', 'value'),
Input('tip-chord-input', 'value'),
Input('fin-height-input', 'value'),
Input('sweep-length-input', 'value')
)
def save_data(number_of_fins: int, root_chord: float, tip_chord: float, fin_height: float, sweep_length: float):
return {
'number_of_fins': round(
metric_convert(number_of_fins,
inputs['number of fins']['input_prefix'],
inputs['number of fins']['si_prefix'])),
'root_chord': round(
metric_convert(root_chord,
inputs['root chord']['input_prefix'],
inputs['root chord']['si_prefix']),
4),
'tip_chord': round(
metric_convert(tip_chord,
inputs['tip chord']['input_prefix'],
inputs['tip chord']['si_prefix']),
4),
'fin_height': round(
metric_convert(fin_height,
inputs['fin height']['input_prefix'],
inputs['fin height']['si_prefix']),
4),
'sweep_length': round(
metric_convert(sweep_length,
inputs['sweep length']['input_prefix'],
inputs['sweep length']['si_prefix']),
4)
}
def init_data(data):
if 'number_of_fins' not in data.keys():
data['number_of_fins'] = rb.convert_default_input('number of fins', inputs)
if 'root_chord' not in data.keys():
data['root_chord'] = rb.convert_default_input('root chord', inputs)
if 'sweep_length' not in data.keys():
data['sweep_length'] = rb.convert_default_input('sweep length', inputs)
if 'tip_chord' not in data.keys():
data['tip_chord'] = rb.convert_default_input('tip chord', inputs)
if 'fin_height' not in data.keys():
data['fin_height'] = rb.convert_default_input('fin height', inputs)
|
the-stack_0_21961 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import itertools
import numpy as np
import pytest
from sklearn.utils._testing import assert_array_almost_equal
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
msg = 'Expected 2D array, got 1D array instead'
with pytest.raises(ValueError, match=msg):
fast_mcd(X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
msg = 'Expected 2D array, got 1D array instead'
with pytest.raises(ValueError, match=msg):
mcd.fit(X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_mcd_issue3367():
# Check that MCD completes when the covariance matrix is singular
# i.e. one of the rows and columns are all zeros
rand_gen = np.random.RandomState(0)
# Think of these as the values for X and Y -> 10 values between -5 and 5
data_values = np.linspace(-5, 5, 10).tolist()
# Get the cartesian product of all possible coordinate pairs from above set
data = np.array(list(itertools.product(data_values, data_values)))
# Add a third column that's all zeros to make our data a set of point
# within a plane, which means that the covariance matrix will be singular
data = np.hstack((data, np.zeros((data.shape[0], 1))))
# The below line of code should raise an exception if the covariance matrix
# is singular. As a further test, since we have points in XYZ, the
# principle components (Eigenvectors) of these directly relate to the
# geometry of the points. Since it's a plane, we should be able to test
# that the Eigenvector that corresponds to the smallest Eigenvalue is the
# plane normal, specifically [0, 0, 1], since everything is in the XY plane
# (as I've set it up above). To do this one would start by:
#
# evals, evecs = np.linalg.eigh(mcd_fit.covariance_)
# normal = evecs[:, np.argmin(evals)]
#
# After which we need to assert that our `normal` is equal to [0, 0, 1].
# Do note that there is floating point error associated with this, so it's
# best to subtract the two and then compare some small tolerance (e.g.
# 1e-12).
MinCovDet(random_state=rand_gen).fit(data)
def test_mcd_support_covariance_is_zero():
# Check that MCD returns a ValueError with informative message when the
# covariance of the support data is equal to 0.
X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1])
X_1 = X_1.reshape(-1, 1)
X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3])
X_2 = X_2.reshape(-1, 1)
msg = ('The covariance matrix of the support data is equal to 0, try to '
'increase support_fraction')
for X in [X_1, X_2]:
with pytest.raises(ValueError, match=msg):
MinCovDet().fit(X)
def test_mcd_increasing_det_warning():
# Check that a warning is raised if we observe increasing determinants
# during the c_step. In theory the sequence of determinants should be
# decreasing. Increasing determinants are likely due to ill-conditioned
# covariance matrices that result in poor precision matrices.
X = [[5.1, 3.5, 1.4, 0.2],
[4.9, 3.0, 1.4, 0.2],
[4.7, 3.2, 1.3, 0.2],
[4.6, 3.1, 1.5, 0.2],
[5.0, 3.6, 1.4, 0.2],
[4.6, 3.4, 1.4, 0.3],
[5.0, 3.4, 1.5, 0.2],
[4.4, 2.9, 1.4, 0.2],
[4.9, 3.1, 1.5, 0.1],
[5.4, 3.7, 1.5, 0.2],
[4.8, 3.4, 1.6, 0.2],
[4.8, 3.0, 1.4, 0.1],
[4.3, 3.0, 1.1, 0.1],
[5.1, 3.5, 1.4, 0.3],
[5.7, 3.8, 1.7, 0.3],
[5.4, 3.4, 1.7, 0.2],
[4.6, 3.6, 1.0, 0.2],
[5.0, 3.0, 1.6, 0.2],
[5.2, 3.5, 1.5, 0.2]]
mcd = MinCovDet(random_state=1)
warn_msg = "Determinant has increased"
with pytest.warns(RuntimeWarning, match=warn_msg):
mcd.fit(X)
|
the-stack_0_21963 | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration tests for Issue with IssueTracker integration."""
import mock
import ddt
from ggrc import db
from ggrc import models
from ggrc import settings
from ggrc.integrations import integrations_errors
from ggrc.integrations.synchronization_jobs.issue_sync_job import \
ISSUE_STATUS_MAPPING
from ggrc.models import all_models
from ggrc.models.hooks.issue_tracker import integration_utils
from ggrc.models.hooks.issue_tracker import issue_integration
from ggrc.models.hooks.issue_tracker import issue_tracker_params_builder \
as params_builder
from integration import ggrc
from integration.ggrc import api_helper
from integration.ggrc_basic_permissions.models \
import factories as rbac_factories
from integration.ggrc.models import factories
TICKET_ID = 123
@ddt.ddt
class TestIssueIntegration(ggrc.TestCase):
"""Test set for IssueTracker integration functionality."""
DEFAULT_ISSUE_ATTRS = {
"title": "title1",
"context": None,
"status": "Draft",
"enabled": True,
"component_id": 1234,
"hotlist_id": 4321,
"issue_id": TICKET_ID,
"issue_type": "Default Issue Type",
"issue_priority": "P2",
"issue_severity": "S1",
"due_date": "05/16/2018"
}
DEFAULT_TICKET_ATTRS = {
"component_id": 1234,
"hotlist_id": 4321,
"issue_id": TICKET_ID,
"status": "new",
"issue_type": "Default Issue type",
"issue_priority": "P1",
"issue_severity": "S2",
"title": "test title",
"verifier": "[email protected]",
"assignee": "[email protected]",
"ccs": ["[email protected]"],
}
def _request_payload_builder(self, issue_attrs):
"""Build payload for POST request to Issue Tracker"""
payload_attrs = dict(self.DEFAULT_ISSUE_ATTRS, **issue_attrs)
payload = {"issue": {
"title": payload_attrs["title"],
"context": payload_attrs["context"],
"status": payload_attrs["status"],
"due_date": payload_attrs["due_date"],
"issue_tracker": {
"enabled": payload_attrs["enabled"],
"component_id": payload_attrs["component_id"],
"hotlist_id": payload_attrs["hotlist_id"],
"issue_id": payload_attrs["issue_id"],
"issue_type": payload_attrs["issue_type"],
"issue_priority": payload_attrs["issue_priority"],
"issue_severity": payload_attrs["issue_severity"],
"title": payload_attrs["title"],
}
}}
return payload
def _put_request_payload_builder(self, issue_attrs):
"""Build payload for PUT request to Issue Tracker"""
payload_attrs = dict(self.DEFAULT_ISSUE_ATTRS, **issue_attrs)
payload = {
"issue_tracker": {
"enabled": payload_attrs["enabled"],
"component_id": payload_attrs["component_id"],
"hotlist_id": payload_attrs["hotlist_id"],
"issue_id": payload_attrs["issue_id"],
"issue_type": payload_attrs["issue_type"],
"issue_priority": payload_attrs["issue_priority"],
"issue_severity": payload_attrs["issue_severity"],
"title": payload_attrs["title"],
}
}
return payload
def _response_payload_builder(self, ticket_attrs):
"""Build payload for response from Issue Tracker via get_issue method"""
payload_attrs = dict(self.DEFAULT_TICKET_ATTRS, **ticket_attrs)
payload = {"issueState": {
"component_id": payload_attrs["component_id"],
"hotlist_ids": [payload_attrs["hotlist_id"], ],
"issue_id": payload_attrs["issue_id"],
"status": payload_attrs["status"],
"issue_type": payload_attrs["issue_type"],
"issue_priority": payload_attrs["issue_priority"],
"issue_severity": payload_attrs["issue_severity"],
"title": payload_attrs["title"],
"verifier": payload_attrs["verifier"],
"assignee": payload_attrs["assignee"],
"ccs": payload_attrs["ccs"],
}}
return payload
def _check_iti_fields(self,
obj,
issue_tracker_issue,
issue_attrs,
issue_tracker_ticket_attrs):
"""Checks issuetracker_issue were updated correctly.
Make assertions to check if issue tracker fields were updated according
our business logic.
For Issue model we should get title, component_id, hotlist_id,
priority, severity, issue_id and issue_type from GGRC and status from
Issue Tracker.
"""
self.assertTrue(issue_tracker_issue.enabled)
# According to our business logic these attributes should be taken
# from issue information
self.assertEqual(issue_tracker_issue.title,
issue_attrs["issue"]["title"])
self.assertEqual(int(issue_tracker_issue.component_id),
issue_attrs["issue"]["issue_tracker"]["component_id"])
self.assertEqual(int(issue_tracker_issue.hotlist_id),
issue_attrs["issue"]["issue_tracker"]["hotlist_id"])
self.assertEqual(issue_tracker_issue.issue_priority,
issue_attrs["issue"]["issue_tracker"]["issue_priority"])
self.assertEqual(issue_tracker_issue.issue_severity,
issue_attrs["issue"]["issue_tracker"]["issue_severity"])
self.assertEqual(int(issue_tracker_issue.issue_id),
issue_attrs["issue"]["issue_tracker"]["issue_id"])
self.assertEqual(issue_tracker_issue.issue_type,
issue_attrs["issue"]["issue_tracker"]["issue_type"])
# These attributes should be taken from ticket information
ticket_status = issue_tracker_ticket_attrs["issueState"]["status"]
ticket_mapped_status = ISSUE_STATUS_MAPPING[ticket_status]
self.assertEqual(obj.status, ticket_mapped_status)
def setUp(self):
super(TestIssueIntegration, self).setUp()
self.api = api_helper.Api()
self.client.get("/login")
@mock.patch("ggrc.integrations.issues.Client.create_issue",
return_value={"issueId": "issueId"})
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_create_issue_tracker_info(self, mock_create_issue):
"""Test creation issue tracker issue for Issue object."""
component_id = "1234"
hotlist_id = "4321"
issue_type = "Default Issue type"
issue_priority = "P2"
issue_severity = "S1"
title = "test title"
with mock.patch.object(integration_utils, "exclude_auditor_emails",
return_value={u"[email protected]", }):
response = self.api.post(all_models.Issue, {
"issue": {
"title": title,
"context": None,
"issue_tracker": {
"enabled": True,
"component_id": int(component_id),
"hotlist_id": int(hotlist_id),
"issue_type": issue_type,
"issue_priority": issue_priority,
"issue_severity": issue_severity,
},
"due_date": "10/10/2019"
},
})
mock_create_issue.assert_called_once()
self.assertEqual(response.status_code, 201)
issue_id = response.json.get("issue").get("id")
issue_tracker_issue = models.IssuetrackerIssue.get_issue("Issue",
issue_id)
self.assertTrue(issue_tracker_issue.enabled)
self.assertEqual(issue_tracker_issue.title, title)
self.assertEqual(issue_tracker_issue.component_id, component_id)
self.assertEqual(issue_tracker_issue.hotlist_id, hotlist_id)
self.assertEqual(issue_tracker_issue.issue_type, issue_type)
self.assertEqual(issue_tracker_issue.issue_priority, issue_priority)
self.assertEqual(issue_tracker_issue.issue_severity, issue_severity)
def test_exclude_auditor(self):
"""Test 'exclude_auditor_emails' util."""
audit = factories.AuditFactory()
person = factories.PersonFactory(email="[email protected]")
audit.add_person_with_role_name(person, "Auditors")
db.session.commit()
result = integration_utils.exclude_auditor_emails(["[email protected]",
"[email protected]"])
self.assertEqual(result, {"[email protected]", })
@ddt.data(
({"description": "new description"},
{"comment": "Issue Description has been updated.\nnew description"}),
({"test_plan": "new test plan"},
{"comment": "Issue Remediation Plan has been updated.\nnew test plan"}),
({"issue_tracker": {"component_id": "123",
"enabled": True,
"issue_id": TICKET_ID}},
{"component_id": 123}),
({"issue_tracker": {"hotlist_id": "321",
"enabled": True,
"issue_id": TICKET_ID}},
{"hotlist_ids": [321, ]}),
({"issue_tracker": {"issue_priority": "P2",
"enabled": True,
"issue_id": TICKET_ID}},
{"priority": "P2"}),
({"issue_tracker": {"issue_severity": "S2",
"enabled": True,
"issue_id": TICKET_ID}},
{"severity": "S2"}),
({"issue_tracker": {"enabled": False,
"hotlist_ids": [999, ],
"issue_id": TICKET_ID}},
{"comment": "Changes to this GGRC object will no longer be "
"tracked within this bug."}),
({"issue_tracker": {"title": "test_iti_title",
"enabled": True,
"issue_id": TICKET_ID}},
{"title": "test_iti_title"}),
)
@ddt.unpack
@mock.patch("ggrc.integrations.issues.Client.update_issue")
def test_update_issue(self, issue_attrs, expected_query, mock_update_issue):
"""Test updating issue tracker issue."""
iti = factories.IssueTrackerIssueFactory(
enabled=True,
issue_id=TICKET_ID,
issue_tracked_obj=factories.IssueFactory()
)
with mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True):
self.api.put(iti.issue_tracked_obj, issue_attrs)
mock_update_issue.assert_called_with(iti.issue_id, expected_query)
@ddt.data(
{"notes": "new notes"},
{"end_date": "2018-07-15"},
{"start_date": "2018-07-15"},
)
@mock.patch("ggrc.integrations.issues.Client.update_issue")
def test_update_untracked_fields(self, issue_attrs, mock_update_issue):
"""Test updating issue with fields which shouldn't be sync."""
iti = factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=factories.IssueFactory()
)
with mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True):
self.api.put(iti.issue_tracked_obj, issue_attrs)
mock_update_issue.assert_not_called()
@mock.patch("ggrc.integrations.issues.Client.update_issue")
def test_issue_tracker_error(self, update_issue_mock):
"""Test issue tracker errors.
Issue in Issue tracker doesn't change state
in case receiving an error.
"""
iti = factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=factories.IssueFactory()
)
update_issue_mock.side_effect = integrations_errors.HttpError("data")
issue_attrs = {
"issue_tracker": {
"enabled": True,
"hotlist_id": "123",
"issue_id": iti.issue_id,
}
}
with mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True),\
mock.patch.object(all_models.IssuetrackerIssue,
"create_or_update_from_dict") as update_info_mock:
self.api.put(iti.issue_tracked_obj, issue_attrs)
# Check that "enabled" flag hasn't been changed.
self.assertTrue("enabled" not in update_info_mock.call_args[0][1])
@mock.patch("ggrc.integrations.issues.Client.update_issue")
def test_delete_issue(self, mock_update_issue):
"""Test updating issue tracker issue when issue in GGRC has been deleted"""
iti = factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=factories.IssueFactory()
)
expected_query = {"comment": "GGRC object has been deleted. GGRC changes "
"will no longer be tracked within this bug."}
with mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True):
self.api.delete(iti.issue_tracked_obj)
mock_update_issue.assert_called_with(iti.issue_id, expected_query)
@ddt.data("test comment",
" \n\ntest comment\n\n"
" \n\n \n\n")
@mock.patch.object(params_builder.BaseIssueTrackerParamsBuilder,
"get_ggrc_object_url",
return_value="http://issue_url.com")
@mock.patch("ggrc.integrations.issues.Client.update_issue")
def test_adding_comment_to_issue(self, desc, update_issue_mock,
url_builder_mock):
"""Test adding comment to issue."""
role = all_models.Role.query.filter(
all_models.Role.name == "Administrator"
).one()
with factories.single_commit():
client_user = factories.PersonFactory(name="Test User")
rbac_factories.UserRoleFactory(role=role, person=client_user)
self.api.set_user(client_user)
self.client.get("/login")
iti = factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=factories.IssueFactory()
)
comment = factories.CommentFactory(description=desc)
builder_class = params_builder.BaseIssueTrackerParamsBuilder
expected_result = {
"comment":
builder_class.COMMENT_TMPL.format(
author=client_user.name,
comment="test comment",
model="Issue",
link="http://issue_url.com",
)
}
with mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True):
self.api.post(all_models.Relationship, {
"relationship": {
"source": {"id": iti.issue_tracked_obj.id, "type": "Issue"},
"destination": {"id": comment.id, "type": "comment"},
"context": None
},
})
url_builder_mock.assert_called_once()
update_issue_mock.assert_called_with(iti.issue_id, expected_result)
@mock.patch("ggrc.integrations.issues.Client.update_issue")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_mapping_document(self, update_issue_mock):
"""Test map document action on issue.
Issue in Issue tracker shouldn't be updated when reference url has been
added to issue.
"""
iti = factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=factories.IssueFactory()
)
document = factories.DocumentFactory()
response = self.api.put(
iti.issue_tracked_obj,
{
"actions": {
"add_related": [{"id": document.id, "type": "Document", }, ]
}
}
)
self.assert200(response)
relationship = all_models.Relationship.query.filter(
all_models.Relationship.source_type == "Issue",
all_models.Relationship.source_id == response.json["issue"]["id"],
).order_by(all_models.Relationship.id.desc()).first()
self.assertEqual(relationship.destination_id, document.id)
self.assertEqual(relationship.source_id, iti.issue_tracked_obj.id)
# Check that issue in Issue Tracker hasn't been updated.
update_issue_mock.assert_not_called()
def test_prepare_update_json(self):
"""Test prepare_update_json method for Issue."""
with factories.single_commit():
issue = factories.IssueFactory()
iti = factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=issue,
title='title',
component_id=123,
hotlist_id=321,
issue_type="PROCESS",
issue_priority="P3",
issue_severity="S3",
)
without_info = issue_integration.prepare_issue_update_json(issue)
issue_info = issue.issue_tracker
with_info = issue_integration.prepare_issue_update_json(issue, issue_info)
expected_info = {
'component_id': 123,
'severity': u'S3',
'title': iti.title,
'hotlist_ids': [321, ],
'priority': u'P3',
'type': u'PROCESS',
}
self.assertEqual(expected_info, with_info)
self.assertEqual(without_info, with_info)
@ddt.ddt
class TestIssueLink(TestIssueIntegration):
"""Test linking functionality."""
@ddt.data(
({"title": "first_title"}, {"title": "other_title"}),
({"issue_type": "type1"}, {"issue_type": "process"}),
({"issue_severity": "S0"}, {"issue_severity": "S1"}),
({"issue_priority": "P0"}, {"issue_priority": "P1"}),
({"hotlist_id": 1234}, {"hotlist_id": 4321}),
({"component_id": 1234}, {"component_id": 4321}),
({"status": "Draft"}, {"status": "fixed"}),
)
@ddt.unpack
@mock.patch("ggrc.integrations.issues.Client.update_issue")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_new_issue_linking(self, issue_attrs, ticket_attrs, update_mock):
"""Test linking new Issue to IssueTracker ticket sets correct fields"""
issue_request_payload = self._request_payload_builder(issue_attrs)
response_payload = self._response_payload_builder(ticket_attrs)
with mock.patch("ggrc.integrations.issues.Client.get_issue",
return_value=response_payload) as get_mock:
with mock.patch.object(integration_utils, "exclude_auditor_emails",
return_value={u"[email protected]", }):
response = self.api.post(all_models.Issue, issue_request_payload)
get_mock.assert_called_once()
update_mock.assert_called_once()
self.assertEqual(response.status_code, 201)
issue_id = response.json.get("issue").get("id")
issue_tracker_issue = models.IssuetrackerIssue.get_issue("Issue", issue_id)
issue = all_models.Issue.query.filter_by(id=issue_id).first()
self._check_iti_fields(issue,
issue_tracker_issue,
issue_request_payload,
response_payload)
@mock.patch("ggrc.integrations.issues.Client.update_issue")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_people_merge_after_linking(self, update_mock):
"""Test people roles were updated while linking new ticket"""
ticket_attrs = {
"verifier": "[email protected]",
"assignee": "[email protected]",
"ccs": ["[email protected]", "[email protected]"],
}
with factories.single_commit():
factories.PersonFactory(email="[email protected]")
factories.PersonFactory(email="[email protected]")
for email in ["[email protected]", "[email protected]"]:
factories.PersonFactory(email=email)
issue_request_payload = self._request_payload_builder({})
response_payload = self._response_payload_builder(ticket_attrs)
with mock.patch("ggrc.integrations.issues.Client.get_issue",
return_value=response_payload) as get_mock:
with mock.patch.object(integration_utils, "exclude_auditor_emails",
return_value={u"[email protected]", }):
response = self.api.post(all_models.Issue, issue_request_payload)
get_mock.assert_called_once()
update_mock.assert_called_once()
self.assertEqual(response.status_code, 201)
issue_id = response.json.get("issue").get("id")
issue = all_models.Issue.query.filter_by(id=issue_id).first()
admins = [person.email for person
in issue.get_persons_for_rolename("Admin")]
primary = [person.email for person
in issue.get_persons_for_rolename("Primary Contacts")]
secondary = [person.email for person
in issue.get_persons_for_rolename("Secondary Contacts")]
# assert ticket roles were added to Issue
self.assertIn("[email protected]", admins)
self.assertIn("[email protected]", primary)
for person in ["[email protected]", "[email protected]"]:
self.assertIn(person, secondary)
@mock.patch("ggrc.integrations.issues.Client.update_issue")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_existing_issue_link(self, update_mock):
"""Test Issue link to another ticket """
iti = factories.IssueTrackerIssueFactory(
enabled=True,
issue_id=TICKET_ID,
issue_tracked_obj=factories.IssueFactory()
)
new_ticket_id = TICKET_ID + 1
new_data = {"issue_id": new_ticket_id}
issue_request_payload = self._put_request_payload_builder(new_data)
response_payload = self._response_payload_builder(new_data)
with mock.patch("ggrc.integrations.issues.Client.get_issue",
return_value=response_payload) as get_mock:
with mock.patch.object(integration_utils, "exclude_auditor_emails",
return_value={u"[email protected]", }):
response = self.api.put(iti.issue_tracked_obj, issue_request_payload)
get_mock.assert_called_once()
self.assert200(response)
# check if data was changed in our DB
issue_id = response.json.get("issue").get("id")
issue_tracker_issue = models.IssuetrackerIssue.get_issue("Issue", issue_id)
self.assertEqual(int(issue_tracker_issue.issue_id), new_ticket_id)
# check detach comment was sent
detach_comment_template = params_builder.IssueParamsBuilder.DETACH_TMPL
comment = detach_comment_template.format(new_ticket_id=new_ticket_id)
expected_args = (TICKET_ID, {"status": "OBSOLETE", "comment": comment})
self.assertEqual(expected_args, update_mock.call_args[0])
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_already_linked_ticket(self):
"""Test Issue without IT couldn't be linked to already linked ticket"""
with factories.single_commit():
factories.IssueTrackerIssueFactory(
enabled=True,
issue_id=TICKET_ID,
issue_tracked_obj=factories.IssueFactory()
)
new_issue = factories.IssueFactory()
issue_data = {"issue_id": TICKET_ID}
issue_request_payload = self._put_request_payload_builder(issue_data)
response = self.api.put(new_issue, issue_request_payload)
self.assert200(response)
self.assertTrue(response.json["issue"]["issue_tracker"]["_warnings"])
issue_id = response.json.get("issue").get("id")
issue_tracker_issue = models.IssuetrackerIssue.get_issue("Issue", issue_id)
self.assertFalse(issue_tracker_issue)
@mock.patch("ggrc.integrations.issues.Client.update_issue")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_link_issue_without_hotlist(self, update_mock):
"""Test hotlist populates from ticket if user haven't specified it."""
issue_request_payload = self._request_payload_builder({"hotlist_id": ""})
response_payload = self._response_payload_builder({"hotlist_id": 4321})
with mock.patch("ggrc.integrations.issues.Client.get_issue",
return_value=response_payload) as get_mock:
with mock.patch.object(integration_utils, "exclude_auditor_emails",
return_value={u"[email protected]", }):
response = self.api.post(all_models.Issue, issue_request_payload)
get_mock.assert_called_once()
update_mock.assert_called_once()
self.assertEqual(response.status_code, 201)
issue_id = response.json.get("issue").get("id")
issue_tracker_issue = models.IssuetrackerIssue.get_issue("Issue", issue_id)
self.assertEqual(int(issue_tracker_issue.hotlist_id), 4321)
@mock.patch("ggrc.integrations.issues.Client.update_issue")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_new_ticket_for_issue(self, update_mock):
"""Test create new ticket for already linked issue"""
iti = factories.IssueTrackerIssueFactory(
enabled=True,
issue_id=TICKET_ID,
issue_tracked_obj=factories.IssueFactory()
)
new_data = {"issue_id": ''}
issue_request_payload = self._put_request_payload_builder(new_data)
with mock.patch.object(integration_utils, "exclude_auditor_emails",
return_value={u"[email protected]", }):
with mock.patch("ggrc.integrations.issues.Client.create_issue",
return_value={"issueId": TICKET_ID + 1}) as create_mock:
response = self.api.put(iti.issue_tracked_obj, issue_request_payload)
self.assert200(response)
# Detach comment should be sent to previous ticket
update_mock.assert_called_once()
self.assertEqual(TICKET_ID, update_mock.call_args[0][0])
create_mock.assert_called_once()
# check if data was changed in our DB
issue_id = response.json.get("issue").get("id")
issue_tracker_issue = models.IssuetrackerIssue.get_issue("Issue", issue_id)
self.assertNotEqual(int(issue_tracker_issue.issue_id), TICKET_ID)
@ddt.data("Primary Contacts", "Admin", "Secondary Contacts")
@mock.patch('ggrc.settings.INTEGRATION_SERVICE_URL', new='mock')
def test_create_missed_issue_acl(self, role):
"""Test create_missed_issue_acl method"""
test_email = "[email protected]"
issue = factories.IssueFactory()
issue_integration.create_missed_issue_acl(test_email, role, issue)
db.session.commit()
person = all_models.Person.query.filter_by(email=test_email).one()
role_emails = [
p.email for p in issue.get_persons_for_rolename(role)
]
self.assertIn(person.email, role_emails)
@ddt.data("Primary Contacts", "Admin", "Secondary Contacts")
@mock.patch('ggrc.utils.user_generator.find_user', return_value=None)
def test_invalid_person_was_skipped(self, role, find_mock):
"""Invalid users should be skipped"""
test_email = "[email protected]"
issue = factories.IssueFactory()
issue_integration.create_missed_issue_acl(test_email, role, issue)
db.session.commit()
people = all_models.Person.query.filter_by(email=test_email).all()
self.assertFalse(people)
find_mock.assert_called_once()
@ddt.ddt
class TestDisabledIssueIntegration(ggrc.TestCase):
"""Tests for IssueTracker integration functionality with disabled sync."""
def setUp(self):
super(TestDisabledIssueIntegration, self).setUp()
self.api = api_helper.Api()
self.client.get("/login")
@mock.patch("ggrc.integrations.issues.Client.create_issue")
def test_issue_creation(self, mock_create_issue):
"""Test creating Issue object with disabled integration."""
with mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True):
response = self.api.post(all_models.Issue, {
"issue": {
"title": "test title",
"context": None,
"issue_tracker": {
"enabled": False,
},
"due_date": "10/10/2019"
},
})
mock_create_issue.assert_not_called()
self.assertEqual(response.status_code, 201)
issue_id = response.json.get("issue").get("id")
issue_tracker_issue = models.IssuetrackerIssue.get_issue("Issue",
issue_id)
self.assertIsNone(issue_tracker_issue)
@mock.patch("ggrc.integrations.issues.Client.update_issue")
def test_issue_deletion(self, mock_update_issue):
"""Test deleting Issue object with disabled integration for issue."""
iti = factories.IssueTrackerIssueFactory(
enabled=False,
issue_tracked_obj=factories.IssueFactory()
)
with mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True):
self.api.delete(iti.issue_tracked_obj)
mock_update_issue.assert_not_called()
@ddt.data(
{"description": "new description",
"issue_tracker": {"issue_id": TICKET_ID, "enabled": False}},
{"test_plan": "new test plan",
"issue_tracker": {"issue_id": TICKET_ID, "enabled": False}},
{"issue_tracker": {"issue_id": TICKET_ID,
"component_id": "123",
"enabled": False}},
{"issue_tracker": {"issue_id": TICKET_ID,
"hotlist_id": "321",
"enabled": False}},
{"issue_tracker": {"issue_id": TICKET_ID,
"issue_priority": "P2",
"enabled": False}},
{"issue_tracker": {"issue_id": TICKET_ID,
"issue_severity": "S2",
"enabled": False}},
{"issue_tracker": {"issue_id": TICKET_ID,
"title": "title1",
"enabled": False}},
)
@mock.patch("ggrc.integrations.issues.Client.update_issue")
def test_update_issue_object(self, issue_attrs, mock_update_issue):
"""Test updating issue object with disabled integration for issue."""
iti = factories.IssueTrackerIssueFactory(
enabled=False,
issue_id=TICKET_ID,
issue_tracked_obj=factories.IssueFactory()
)
with mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True):
self.api.put(iti.issue_tracked_obj, issue_attrs)
mock_update_issue.assert_not_called()
@mock.patch("ggrc.integrations.issues.Client.create_issue",
side_effect=[integrations_errors.Error, {"issueId": "issueId"}])
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_issue_recreation(self, _):
"""Test retrying to turn on integration after failed creation."""
# Arrange data.
component_id = "1234"
hotlist_id = "4321"
issue_type = "Default Issue type"
issue_priority = "P2"
issue_severity = "S1"
title = "test title"
issue_tracker_attrs = {
"enabled": True,
"component_id": int(component_id),
"hotlist_id": int(hotlist_id),
"issue_type": issue_type,
"issue_priority": issue_priority,
"issue_severity": issue_severity,
}
# Perform actions and assert results.
with mock.patch.object(integration_utils, "exclude_auditor_emails",
return_value={u"[email protected]", }):
# Try to create issue. create_issue should raise exception here.
response = self.api.post(all_models.Issue, {
"issue": {
"title": title,
"context": None,
"issue_tracker": issue_tracker_attrs,
"due_date": "10/10/2019"
},
})
issue_id = response.json.get("issue").get("id")
issue_tracker_issue = models.IssuetrackerIssue.get_issue("Issue",
issue_id)
self.assertIsNone(issue_tracker_issue.issue_id)
self.assertIsNone(issue_tracker_issue.issue_url)
# Try to turn on integration on already created issue.
self.api.put(
issue_tracker_issue.issue_tracked_obj,
{"issue_tracker": issue_tracker_attrs}
)
issue_id = issue_tracker_issue.issue_tracked_obj.id
issue_tracker_issue = models.IssuetrackerIssue.get_issue("Issue",
issue_id)
self.assertEqual(issue_tracker_issue.issue_url, "http://issue/issueId")
@mock.patch("ggrc.integrations.issues.Client.update_issue")
def test_adding_comment_to_issue(self, update_issue_mock):
"""Test not adding comment to issue when issue tracker disabled."""
iti = factories.IssueTrackerIssueFactory(
enabled=False,
issue_tracked_obj=factories.IssueFactory()
)
comment = factories.CommentFactory(description="test comment")
with mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True):
self.api.post(all_models.Relationship, {
"relationship": {
"source": {"id": iti.issue_tracked_obj.id, "type": "Issue"},
"destination": {"id": comment.id, "type": "comment"},
"context": None
},
})
update_issue_mock.assert_not_called()
|
the-stack_0_21964 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystoneclient.contrib.ec2 import utils as ec2_utils
from keystoneclient import exceptions as client_exceptions
from keystone import config
from keystone import tests
from keystone.tests import test_keystoneclient
CONF = config.CONF
class ClientDrivenSqlTestCase(test_keystoneclient.ClientDrivenTestCase):
def config_files(self):
config_files = super(ClientDrivenSqlTestCase, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
def setUp(self):
super(ClientDrivenSqlTestCase, self).setUp()
self.default_client = self.get_client()
self.addCleanup(self.cleanup_instance('default_client'))
def test_endpoint_crud(self):
client = self.get_client(admin=True)
service = client.services.create(name=uuid.uuid4().hex,
service_type=uuid.uuid4().hex,
description=uuid.uuid4().hex)
endpoint_region = uuid.uuid4().hex
invalid_service_id = uuid.uuid4().hex
endpoint_publicurl = uuid.uuid4().hex
endpoint_internalurl = uuid.uuid4().hex
endpoint_adminurl = uuid.uuid4().hex
# a non-existent service ID should trigger a 404
self.assertRaises(client_exceptions.NotFound,
client.endpoints.create,
region=endpoint_region,
service_id=invalid_service_id,
publicurl=endpoint_publicurl,
adminurl=endpoint_adminurl,
internalurl=endpoint_internalurl)
endpoint = client.endpoints.create(region=endpoint_region,
service_id=service.id,
publicurl=endpoint_publicurl,
adminurl=endpoint_adminurl,
internalurl=endpoint_internalurl)
self.assertEqual(endpoint.region, endpoint_region)
self.assertEqual(endpoint.service_id, service.id)
self.assertEqual(endpoint.publicurl, endpoint_publicurl)
self.assertEqual(endpoint.internalurl, endpoint_internalurl)
self.assertEqual(endpoint.adminurl, endpoint_adminurl)
client.endpoints.delete(id=endpoint.id)
self.assertRaises(client_exceptions.NotFound, client.endpoints.delete,
id=endpoint.id)
def _send_ec2_auth_request(self, credentials, client=None):
if not client:
client = self.default_client
url = '%s/ec2tokens' % self.default_client.auth_url
(resp, token) = client.request(
url=url, method='POST',
body={'credentials': credentials})
return resp, token
def _generate_default_user_ec2_credentials(self):
cred = self. default_client.ec2.create(
user_id=self.user_foo['id'],
tenant_id=self.tenant_bar['id'])
return self._generate_user_ec2_credentials(cred.access, cred.secret)
def _generate_user_ec2_credentials(self, access, secret):
signer = ec2_utils.Ec2Signer(secret)
credentials = {'params': {'SignatureVersion': '2'},
'access': access,
'verb': 'GET',
'host': 'localhost',
'path': '/service/cloud'}
signature = signer.generate(credentials)
return credentials, signature
def test_ec2_auth_success(self):
credentials, signature = self._generate_default_user_ec2_credentials()
credentials['signature'] = signature
resp, token = self._send_ec2_auth_request(credentials)
self.assertEqual(resp.status_code, 200)
self.assertIn('access', token)
def test_ec2_auth_success_trust(self):
# Add "other" role user_foo and create trust delegating it to user_two
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_other['id'])
trust_id = 'atrust123'
trust = {'trustor_user_id': self.user_foo['id'],
'trustee_user_id': self.user_two['id'],
'project_id': self.tenant_bar['id'],
'impersonation': True}
roles = [self.role_other]
self.trust_api.create_trust(trust_id, trust, roles)
# Create a client for user_two, scoped to the trust
client = self.get_client(self.user_two)
ret = client.authenticate(trust_id=trust_id,
tenant_id=self.tenant_bar['id'])
self.assertTrue(ret)
self.assertTrue(client.auth_ref.trust_scoped)
self.assertEqual(trust_id, client.auth_ref.trust_id)
# Create an ec2 keypair using the trust client impersonating user_foo
cred = client.ec2.create(user_id=self.user_foo['id'],
tenant_id=self.tenant_bar['id'])
credentials, signature = self._generate_user_ec2_credentials(
cred.access, cred.secret)
credentials['signature'] = signature
resp, token = self._send_ec2_auth_request(credentials)
self.assertEqual(resp.status_code, 200)
self.assertEqual(trust_id, token['access']['trust']['id'])
#TODO(shardy) we really want to check the roles and trustee
# but because of where the stubbing happens we don't seem to
# hit the necessary code in controllers.py _authenticate_token
# so although all is OK via a real request, it incorrect in
# this test..
def test_ec2_auth_failure(self):
credentials, signature = self._generate_default_user_ec2_credentials()
credentials['signature'] = uuid.uuid4().hex
self.assertRaises(client_exceptions.Unauthorized,
self._send_ec2_auth_request,
credentials)
def test_ec2_credential_crud(self):
creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
self.assertEqual(creds, [])
cred = self.default_client.ec2.create(user_id=self.user_foo['id'],
tenant_id=self.tenant_bar['id'])
creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
self.assertEqual(creds, [cred])
got = self.default_client.ec2.get(user_id=self.user_foo['id'],
access=cred.access)
self.assertEqual(cred, got)
self.default_client.ec2.delete(user_id=self.user_foo['id'],
access=cred.access)
creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
self.assertEqual(creds, [])
def test_ec2_credential_crud_non_admin(self):
na_client = self.get_client(self.user_two)
creds = na_client.ec2.list(user_id=self.user_two['id'])
self.assertEqual(creds, [])
cred = na_client.ec2.create(user_id=self.user_two['id'],
tenant_id=self.tenant_baz['id'])
creds = na_client.ec2.list(user_id=self.user_two['id'])
self.assertEqual(creds, [cred])
got = na_client.ec2.get(user_id=self.user_two['id'],
access=cred.access)
self.assertEqual(cred, got)
na_client.ec2.delete(user_id=self.user_two['id'],
access=cred.access)
creds = na_client.ec2.list(user_id=self.user_two['id'])
self.assertEqual(creds, [])
def test_ec2_list_credentials(self):
cred_1 = self.default_client.ec2.create(
user_id=self.user_foo['id'],
tenant_id=self.tenant_bar['id'])
cred_2 = self.default_client.ec2.create(
user_id=self.user_foo['id'],
tenant_id=self.tenant_service['id'])
cred_3 = self.default_client.ec2.create(
user_id=self.user_foo['id'],
tenant_id=self.tenant_mtu['id'])
two = self.get_client(self.user_two)
cred_4 = two.ec2.create(user_id=self.user_two['id'],
tenant_id=self.tenant_bar['id'])
creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
self.assertEqual(len(creds), 3)
self.assertEqual(sorted([cred_1, cred_2, cred_3],
key=lambda x: x.access),
sorted(creds, key=lambda x: x.access))
self.assertNotIn(cred_4, creds)
def test_ec2_credentials_create_404(self):
self.assertRaises(client_exceptions.NotFound,
self.default_client.ec2.create,
user_id=uuid.uuid4().hex,
tenant_id=self.tenant_bar['id'])
self.assertRaises(client_exceptions.NotFound,
self.default_client.ec2.create,
user_id=self.user_foo['id'],
tenant_id=uuid.uuid4().hex)
def test_ec2_credentials_delete_404(self):
self.assertRaises(client_exceptions.NotFound,
self.default_client.ec2.delete,
user_id=uuid.uuid4().hex,
access=uuid.uuid4().hex)
def test_ec2_credentials_get_404(self):
self.assertRaises(client_exceptions.NotFound,
self.default_client.ec2.get,
user_id=uuid.uuid4().hex,
access=uuid.uuid4().hex)
def test_ec2_credentials_list_404(self):
self.assertRaises(client_exceptions.NotFound,
self.default_client.ec2.list,
user_id=uuid.uuid4().hex)
def test_ec2_credentials_list_user_forbidden(self):
two = self.get_client(self.user_two)
self.assertRaises(client_exceptions.Forbidden, two.ec2.list,
user_id=self.user_foo['id'])
def test_ec2_credentials_get_user_forbidden(self):
cred = self.default_client.ec2.create(user_id=self.user_foo['id'],
tenant_id=self.tenant_bar['id'])
two = self.get_client(self.user_two)
self.assertRaises(client_exceptions.Forbidden, two.ec2.get,
user_id=self.user_foo['id'], access=cred.access)
self.default_client.ec2.delete(user_id=self.user_foo['id'],
access=cred.access)
def test_ec2_credentials_delete_user_forbidden(self):
cred = self.default_client.ec2.create(user_id=self.user_foo['id'],
tenant_id=self.tenant_bar['id'])
two = self.get_client(self.user_two)
self.assertRaises(client_exceptions.Forbidden, two.ec2.delete,
user_id=self.user_foo['id'], access=cred.access)
self.default_client.ec2.delete(user_id=self.user_foo['id'],
access=cred.access)
def test_endpoint_create_404(self):
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.endpoints.create,
region=uuid.uuid4().hex,
service_id=uuid.uuid4().hex,
publicurl=uuid.uuid4().hex,
adminurl=uuid.uuid4().hex,
internalurl=uuid.uuid4().hex)
def test_endpoint_delete_404(self):
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.endpoints.delete,
id=uuid.uuid4().hex)
def test_policy_crud(self):
# FIXME(dolph): this test was written prior to the v3 implementation of
# the client and essentially refers to a non-existent
# policy manager in the v2 client. this test needs to be
# moved to a test suite running against the v3 api
self.skipTest('Written prior to v3 client; needs refactor')
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
policy_blob = uuid.uuid4().hex
policy_type = uuid.uuid4().hex
service = client.services.create(
name=uuid.uuid4().hex,
service_type=uuid.uuid4().hex,
description=uuid.uuid4().hex)
endpoint = client.endpoints.create(
service_id=service.id,
region=uuid.uuid4().hex,
adminurl=uuid.uuid4().hex,
internalurl=uuid.uuid4().hex,
publicurl=uuid.uuid4().hex)
# create
policy = client.policies.create(
blob=policy_blob,
type=policy_type,
endpoint=endpoint.id)
self.assertEqual(policy_blob, policy.policy)
self.assertEqual(policy_type, policy.type)
self.assertEqual(endpoint.id, policy.endpoint_id)
policy = client.policies.get(policy=policy.id)
self.assertEqual(policy_blob, policy.policy)
self.assertEqual(policy_type, policy.type)
self.assertEqual(endpoint.id, policy.endpoint_id)
endpoints = [x for x in client.endpoints.list() if x.id == endpoint.id]
endpoint = endpoints[0]
self.assertEqual(policy_blob, policy.policy)
self.assertEqual(policy_type, policy.type)
self.assertEqual(endpoint.id, policy.endpoint_id)
# update
policy_blob = uuid.uuid4().hex
policy_type = uuid.uuid4().hex
endpoint = client.endpoints.create(
service_id=service.id,
region=uuid.uuid4().hex,
adminurl=uuid.uuid4().hex,
internalurl=uuid.uuid4().hex,
publicurl=uuid.uuid4().hex)
policy = client.policies.update(
policy=policy.id,
blob=policy_blob,
type=policy_type,
endpoint=endpoint.id)
policy = client.policies.get(policy=policy.id)
self.assertEqual(policy_blob, policy.policy)
self.assertEqual(policy_type, policy.type)
self.assertEqual(endpoint.id, policy.endpoint_id)
# delete
client.policies.delete(policy=policy.id)
self.assertRaises(
client_exceptions.NotFound,
client.policies.get,
policy=policy.id)
policies = [x for x in client.policies.list() if x.id == policy.id]
self.assertEqual(len(policies), 0)
|
the-stack_0_21966 | import requests
import json
import numpy as np
import pandas as pd
from requests import Session
from requests_futures.sessions import FuturesSession
from concurrent.futures import ThreadPoolExecutor
from datetime import date
class Stock:
def __init__(self, token, version, ticker, period, num_periods):
"""
Class for stock data.
Parameters
----------
token : str
API token for IEX Cloud.
version : str
API version. Can be "stable" or "test".
ticker : str
Stock ticker or symbol. (eg. "MSFT").
period : str
Period intervals for financial data. Can be "annual" or "quarterly"
num_periods : int
The number of historical periods.
Attributes
----------
token : str
API token for IEX Cloud.
version : str
API version. Can be "stable" or "test".
ticker : str
Stock ticker or symbol. (eg. "MSFT").
period : str
Period intervals for financial data. Can be "annual" or "quarterly"
num_periods : int
The number of historical periods.
api_requests: tuple(str, str)
Tuple of API endpoints for iex_company and iex_fundamentals.
iex_company : pd.DataFrame
DataFrame of IEX Cloud's company data for ticker.
iex_fundamentals: pd.DataFrame
DataFrame of IEX Cloud's fundamentals data for ticker.
transformations : pd.DataFrame
DataFrame of all data, including tranformations.
company : pd.DataFrame
DataFrame of company data.
fundamentals : pd.DataFrame
DataFrame of fundamental data.
metrics : pd.DataFrame
DataFrame of company metrics data.
"""
self.ticker = ticker
self.token = token
self.version = version
self.iex_base_url = self.iex_base_url()
self.period = period
self.num_periods = num_periods
self.api_requests = self.api_requests()
self.iex_company = self.iex_company()
self.iex_fundamentals = self.iex_fundamentals()
self.transformations = self.transformations()
self.company = self.company()
self.fundamentals = self.fundamentals()
self.metrics = self.metrics()
def iex_base_url(self):
"""Returns the base url for the API endpoint.
Returns
-------
str
IEX Cloud API base url.
"""
base_url = "https://cloud.iexapis.com/"
test_url = "https://sandbox.iexapis.com/stable"
if self.version == "stable":
url = base_url + "stable"
elif self.version == "test":
url = test_url
return url
def api_requests(self):
"""Concatenate API endpoint urls from Class Attributes
Raises
-------
JSONDecodeError
There was an issue with the way JSON data was formatted. Returns None.
Returns
-------
tuple(str, str)
Tuple of API endpoints for iex_company and iex_fundamentals.
"""
session = FuturesSession(executor=ThreadPoolExecutor(max_workers=10))
try:
session = FuturesSession()
company_url = (
session.get(
f"{self.iex_base_url}/stock/{self.ticker}/company?token={self.token}"
)
.result()
.json()
)
fundamentals_url = (
session.get(
f"{self.iex_base_url}/time-series/fundamentals/{self.ticker}/{self.period}?last={self.num_periods}&token={self.token}"
)
.result()
.json()
)
return company_url, fundamentals_url
except json.JSONDecodeError:
print(
f"Decoding JSON for ticker {self.ticker} has failed. Canceling request for {self.ticker}"
)
def iex_company(self):
"""Creates and filters DataFrame with IEX company data.
Returns
-------
pd.DataFrame
DataFrame of IEX Cloud's company data for ticker.
"""
df = pd.json_normalize(self.api_requests[0])
df["lastUpdated"] = date.today()
df = df[
[
"symbol",
"companyName",
"country",
"exchange",
"industry",
"sector",
"lastUpdated",
]
]
df["industry"] = df["industry"].str.replace(",", ".")
return df
def iex_fundamentals(self):
"""Creates and filters DataFrame with IEX fundamentals data.
Returns
-------
pd.DataFrame
DataFrame of IEX Cloud's fundamentals data for ticker.
"""
df = pd.json_normalize(self.api_requests[1])
if self.period == "annual":
df["filingYear"] = df["filingDate"].str.split("-").str[0]
df["filingQuarter"] = np.nan
df["periodType"] = "annual"
df["periodKey"] = (f"{self.ticker}" + "a" + df["filingYear"]).str.lower()
else:
df["filingYear"] = df["filingDate"].str.split("-").str[0]
df["filingQuarter"] = df["fiscalQuarter"].astype(str)
df["periodType"] = "quarterly"
df["periodKey"] = (
f"{self.ticker}" + "q" + df["filingYear"] + df["filingQuarter"]
).str.lower()
def neg_to_positive(df):
try:
for col in df.iteritems:
df[col].apply(lambda x: x.abs() if x < 0 else x)
except TypeError:
return df
df = neg_to_positive(df)
df["lastUpdated"] = date.today()
return df
def transformations(self):
"""Calculates and transforms the ticker's fundamentals data to calculate metrics.
Returns
-------
pd.DataFrame
DataFrame of all data, including tranformations.
"""
# loading data
df = self.iex_fundamentals
# get the tax rate, make adjustments, change in NWC, and define NOPAT & free cash flow
df["nopat"] = (df["incomeOperating"] * (1 - df["incomeTaxRate"])).astype(int)
# LIFO to FIFO adjustment for COGS and Inventory
def lifo_adjustment(x):
x["lifoDelta"] = (
(x["reservesLifo"] - x["reservesLifo"].shift(periods=-1)).fillna(0)
).astype(int)
x1 = list(x["reservesLifo"])
x2 = list(x["lifoDelta"])
x3 = list(x["inventory"])
x4 = list(x["salesCost"])
def myfunc(x1, x2):
result = [v1 + v2 if v1 > 0 else v2 for v1, v2 in zip(x1, x2)]
return result
def myfunc2(x1, x2):
result = [v2 - v1 if v1 != 0 else v2 for v1, v2 in zip(x1, x2)]
return result
ai = myfunc(x1, x3)
ac = myfunc2(x2, x4)
return ai, ac
ai, ac = lifo_adjustment(df)
df["adjInventory"] = ai
df["adjInventory"] = df["adjInventory"].astype(int)
df["adjCOGS"] = ac
df["adjCOGS"] = df["adjCOGS"].astype(int)
df["nonCashWorkingCapital"] = (
(df["accountsReceivable"] + df["adjInventory"])
- (df["accountsPayable"] + df["nibclRevenueDeferred"])
).astype(int)
df["nonCashWorkingCapitalDelta"] = (
(
df["nonCashWorkingCapital"]
- df["nonCashWorkingCapital"].shift(periods=-1)
).fillna(0)
).astype(int)
# Free cash flow
df["fcf"] = (
(
df["nopat"]
+ df["expensesDepreciationAndAmortization"]
- df["nonCashWorkingCapitalDelta"]
- df["capex"]
)
).astype(int)
# margin ratios
df["grossMargin"] = (df["profitGross"] / df["revenue"]) * 100
df["operatingMargin"] = (df["incomeOperating"] / df["revenue"]) * 100
df["ebitdaMargin"] = (df["ebitdaReported"] / df["revenue"]) * 100
df["ebitMargin"] = (df["ebitReported"] / df["revenue"]) * 100
df["fcfMargin"] = (df["fcf"] / df["revenue"].values) * 100
df["niMargin"] = (df["incomeNet"] / df["revenue"]) * 100
# expense metrics
df["sgaToRev"] = (df["expensesSga"] / df["revenue"]) * 100
df["rndToRev"] = (df["researchAndDevelopmentExpense"] / df["revenue"]) * 100
df["sbcToRev"] = (df["expensesStockCompensation"] / df["revenue"]) * 100
df["capexToRev"] = (df["capex"] / df["revenue"]) * 100
df["capexToFcf"] = (df["capex"] / df["fcf"]) * 100
df["acquisitionCapexToRev"] = (df["capexAcquisition"] / df["revenue"]) * 100
df["maintenanceCapexToRev"] = (df["capexMaintenance"] / df["revenue"]) * 100
df["acquisitionCapexToFcf"] = (df["capexAcquisition"] / df["fcf"]) * 100
df["maintenanceCapexToFcf"] = (df["capexMaintenance"] / df["fcf"]) * 100
# return ratios
df["ROA"] = (df["incomeNet"] / df["assetsUnadjusted"]) * 100
# define average shareholder's equity
df["avgShareholdersEquity"] = (
(
df["equityShareholder"].shift(periods=-1) + df["equityShareholder"]
).fillna(0)
) * 100
df["ROE"] = (df["incomeNet"] / df["avgShareholdersEquity"]) * 100
# define invested capital, ROIC, ROIC, CFROIC, CFROIIC
df["investedCapital"] = (
(
df["nonCashWorkingCapital"]
+ df["ppAndENet"]
+ df["assetsFixedDeferredCompensation"]
+ df["assetsFixedDeferredTax"]
+ df["assetsFixedLeasesOperating"]
+ df["assetsFixedOperatingSubsidiaryUnconsolidated"]
+ df["assetsFixedOther"]
+ df["goodwillAndIntangiblesNetOther"]
+ df["liabilitiesNonCurrentLeasesOperating"]
+ df["nibclLeasesOperating"]
)
).astype(int)
df["cashInvestedCapital"] = (
(df["investedCapital"] + df["depreciationAndAmortizationAccumulated"])
).astype(int)
df["ROIC"] = (df["nopat"] / df["investedCapital"].shift(periods=-1)) * 100
df["ROIIC"] = (
(df["nopat"] - df["nopat"].shift(periods=-1))
/ (
df["investedCapital"].shift(periods=-1)
- df["investedCapital"].shift(periods=-2)
)
) * 100
df["CROIC"] = (df["fcf"] / df["cashInvestedCapital"]) * 100
df["CROIIC"] = (
(df["fcf"] - df["nopat"].shift(periods=-1))
/ (
df["cashInvestedCapital"].shift(periods=-1)
- df["cashInvestedCapital"].shift(periods=-2)
)
) * 100
# activity ratios
df["avgTotalAssets"] = (
(
(
df["assetsUnadjusted"].shift(periods=-1) + df["assetsUnadjusted"]
).fillna(0)
)
/ 2
).astype(int)
df["avgInventory"] = (
((df["adjInventory"].shift(periods=-1) + df["adjInventory"]).fillna(0)) / 2
).astype(int)
df["avgAR"] = (
(
(
df["accountsReceivable"].shift(periods=-1)
+ df["accountsReceivable"]
).fillna(0)
)
/ 2
).astype(int)
df["avgAP"] = (
(
(
df["accountsPayable"].shift(periods=-1) + df["accountsPayable"]
).fillna(0)
)
/ 2
).astype(int)
df["assetTurnover"] = df["revenue"] / df["avgTotalAssets"]
df["inventoryTurnover"] = df["salesCost"] / df["avgInventory"]
df["receivablesTurnover"] = df["revenue"] / df["avgAR"]
df["payablesTurnover"] = df["salesCost"] / df["avgAP"]
# -----------------------------------------------
df["DSO"] = (1 / df["receivablesTurnover"]) * 365
df["DIO"] = (1 / df["inventoryTurnover"]) * 365
df["DPO"] = (1 / df["payablesTurnover"]) * 365
df["CCC"] = df["DSO"] + df["DIO"] - df["DPO"]
# growth
df["revenueGrowth"] = (
(df["revenue"] / df["revenue"].shift(periods=-1)) - 1
).fillna(0) * 100
df["fcfGrowth"] = ((df["fcf"] / df["fcf"].shift(periods=-1)) - 1).fillna(
0
) * 100
df["ebitdaGrowth"] = (
(df["ebitdaReported"] / df["ebitdaReported"].shift(periods=-1)) - 1
).fillna(0) * 100
df["ebitGrowth"] = (
(df["ebitReported"] / df["ebitReported"].shift(periods=-1)) - 1
).fillna(0) * 100
# function to calculate CAGRs
def cagrs(frame, datapoint, n):
x = frame[datapoint].tolist()
col = [
round(np.real(((((x[i] / x[n + i]) ** (1 / n)) - 1) * 100)), 2)
if x[n + i] != 0
else 0
for i, val in enumerate(x)
if i < len(x) - n
]
result = pd.Series(col)
return result
df["3yearRevenueCAGR"] = cagrs(df, "revenue", 3)
df["5yearRevenueCAGR"] = cagrs(df, "revenue", 5)
df["10yearRevenueCAGR"] = cagrs(df, "revenue", 10)
df["3yearFcfCAGR"] = cagrs(df, "fcf", 3)
df["5yearFcfCAGR"] = cagrs(df, "fcf", 5)
df["10yearFcfCAGR"] = cagrs(df, "fcf", 10)
df["3yearEbitdaCAGR"] = cagrs(df, "ebitdaReported", 3)
df["5yearEbitdaCAGR"] = cagrs(df, "ebitdaReported", 5)
df["10yearEbitdaCAGR"] = cagrs(df, "ebitdaReported", 10)
# Rounding all floats to one decimal place
df = df.round(2)
return df
def company(self):
"""Company data
Returns
-------
pd.DataFrame
DataFrame of company data.
"""
df = self.iex_company
return df
def fundamentals(self):
"""Sorted fundamentals data
Returns
-------
pd.DataFrame
DataFrame of fundamentals data.
"""
df = self.transformations
df = df[
[
"periodKey",
"periodType",
"filingDate",
"filingYear",
"filingQuarter",
"fiscalYear",
"fiscalQuarter",
"dataGenerationDate",
"periodEndDate",
"filingType",
"accountsPayable",
"accountsReceivable",
"assetsCurrentCash",
"assetsCurrentCashRestricted",
"assetsCurrentDeferredCompensation",
"assetsCurrentDeferredTax",
"assetsCurrentDiscontinuedOperations",
"assetsCurrentInvestments",
"assetsCurrentLeasesOperating",
"assetsCurrentLoansNet",
"assetsCurrentOther",
"assetsCurrentSeparateAccounts",
"assetsCurrentUnadjusted",
"assetsFixed",
"assetsFixedDeferredCompensation",
"assetsFixedDeferredTax",
"assetsFixedDiscontinuedOperations",
"assetsFixedLeasesOperating",
"assetsFixedOperatingDiscontinuedOperations",
"assetsFixedOperatingSubsidiaryUnconsolidated",
"assetsFixedOreo",
"assetsFixedOther",
"assetsFixedUnconsolidated",
"assetsUnadjusted",
"capex",
"capexAcquisition",
"capexMaintenance",
"cashFlowFinancing",
"cashFlowInvesting",
"cashFlowOperating",
"cashFlowShareRepurchase",
"cashLongTerm",
"cashOperating",
"cashPaidForIncomeTaxes",
"cashPaidForInterest",
"cashRestricted",
"chargeAfterTax",
"chargeAfterTaxDiscontinuedOperations",
"chargesAfterTaxOther",
"creditLossProvision",
"debtFinancial",
"debtShortTerm",
"depreciationAndAmortizationAccumulated",
"depreciationAndAmortizationCashFlow",
"dividendsPreferred",
"dividendsPreferredRedeemableMandatorily",
"earningsRetained",
"ebitReported",
"ebitdaReported",
"equityShareholder",
"equityShareholderOther",
"equityShareholderOtherDeferredCompensation",
"equityShareholderOtherEquity",
"equityShareholderOtherMezzanine",
"expenses",
"expensesAcquisitionMerger",
"expensesCompensation",
"expensesDepreciationAndAmortization",
"expensesDerivative",
"expensesDiscontinuedOperations",
"expensesDiscontinuedOperationsReits",
"expensesEnergy",
"expensesForeignCurrency",
"expensesInterest",
"expensesInterestFinancials",
"expensesInterestMinority",
"expensesLegalRegulatoryInsurance",
"expensesNonOperatingCompanyDefinedOther",
"expensesNonOperatingOther",
"expensesNonOperatingSubsidiaryUnconsolidated",
"expensesNonRecurringOther",
"expensesOperating",
"expensesOperatingOther",
"expensesOperatingSubsidiaryUnconsolidated",
"expensesOreo",
"expensesOreoReits",
"expensesOtherFinancing",
"expensesRestructuring",
"expensesSga",
"expensesStockCompensation",
"expensesWriteDown",
"ffo",
"goodwillAmortizationCashFlow",
"goodwillAmortizationIncomeStatement",
"goodwillAndIntangiblesNetOther",
"goodwillNet",
"incomeFromOperations",
"incomeNet",
"incomeNetPreTax",
"incomeOperating",
"incomeOperatingDiscontinuedOperations",
"incomeOperatingOther",
"incomeOperatingSubsidiaryUnconsolidated",
"incomeOperatingSubsidiaryUnconsolidatedAfterTax",
"incomeTax",
"incomeTaxCurrent",
"incomeTaxDeferred",
"incomeTaxRate",
"interestMinority",
"inventory",
"liabilities",
"liabilitiesCurrent",
"liabilitiesNonCurrentAndInterestMinorityTotal",
"liabilitiesNonCurrentDebt",
"liabilitiesNonCurrentDeferredCompensation",
"liabilitiesNonCurrentDeferredTax",
"liabilitiesNonCurrentDiscontinuedOperations",
"liabilitiesNonCurrentLeasesOperating",
"liabilitiesNonCurrentLongTerm",
"liabilitiesNonCurrentOperatingDiscontinuedOperations",
"liabilitiesNonCurrentOther",
"nibclDeferredCompensation",
"nibclDeferredTax",
"nibclDiscontinuedOperations",
"nibclLeasesOperating",
"nibclOther",
"nibclRestructuring",
"nibclRevenueDeferred",
"nibclSeparateAccounts",
"oci",
"ppAndENet",
"profitGross",
"researchAndDevelopmentExpense",
"reserves",
"reservesInventory",
"reservesLifo",
"reservesLoanLoss",
"revenue",
"revenueCostOther",
"revenueIncomeInterest",
"revenueOther",
"revenueSubsidiaryUnconsolidated",
"salesCost",
"sharesIssued",
"sharesOutstandingPeDateBs",
"sharesTreasury",
"stockCommon",
"stockPreferred",
"stockPreferredEquity",
"stockPreferredMezzanine",
"stockTreasury",
"wabso",
"wabsoSplitAdjusted",
"wadso",
"wadsoSplitAdjusted",
"nopat",
"lifoDelta",
"adjInventory",
"adjCOGS",
"nonCashWorkingCapital",
"nonCashWorkingCapitalDelta",
"fcf",
"investedCapital",
"cashInvestedCapital",
"avgTotalAssets",
"avgInventory",
"avgAR",
"avgAP",
]
]
return df
def metrics(self):
"""Sorted metrics data
Returns
-------
pd.DataFrame
DataFrame of metrics data.
"""
df = self.transformations
df = df[
[
"periodKey",
"periodType",
"filingDate",
"filingYear",
"filingQuarter",
"fiscalYear",
"fiscalQuarter",
"grossMargin",
"operatingMargin",
"ebitdaMargin",
"ebitMargin",
"fcfMargin",
"niMargin",
"revenueGrowth",
"fcfGrowth",
"ebitdaGrowth",
"ebitGrowth",
"3yearRevenueCAGR",
"5yearRevenueCAGR",
"10yearRevenueCAGR",
"3yearFcfCAGR",
"5yearFcfCAGR",
"10yearFcfCAGR",
"3yearEbitdaCAGR",
"5yearEbitdaCAGR",
"10yearEbitdaCAGR",
"sgaToRev",
"rndToRev",
"sbcToRev",
"capexToRev",
"capexToFcf",
"acquisitionCapexToRev",
"maintenanceCapexToRev",
"acquisitionCapexToFcf",
"maintenanceCapexToFcf",
"ROA",
"ROE",
"ROIC",
"ROIIC",
"CROIC",
"CROIIC",
"assetTurnover",
"receivablesTurnover",
"payablesTurnover",
"DSO",
"DIO",
"DPO",
"CCC",
]
]
return df
|
the-stack_0_21967 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import os
from typing import Any, List, Optional
import torch
import torch.distributed as torch_distrib
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from pytorch_lightning import _logger as log
from pytorch_lightning import LightningModule
from pytorch_lightning.plugins.legacy.rpc_plugin import RPCPlugin
from pytorch_lightning.utilities import _FAIRSCALE_PIPE_AVAILABLE, rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
if _FAIRSCALE_PIPE_AVAILABLE:
import fairscale.nn.model_parallel as mpu
from fairscale.nn import PipeRPCWrapper
from fairscale.nn.pipe import balance as pipe_balance
from fairscale.nn.pipe import rpc as rpc_pipe
from fairscale.nn.pipe.pipeline import PipelineStyle
class DDPSequentialPlugin(RPCPlugin):
def __init__(
self,
balance: Optional[List[int]] = None,
microbatches: int = 8,
checkpoint: str = 'except_last',
balance_mode: str = "balance_by_size",
pipelined_backward: Optional[bool] = True,
**kwargs):
"""
Provides sequential model parallelism for :class:`nn.Sequential <torch.nn.Sequential>` module.
If the module requires lots of memory, Pipe can be used to reduce this by leveraging multiple GPUs.
Example::
class MyLightningModule:
def __init__(self):
...
model.sequential_module = torch.nn.Sequential(my_layers)
# Split my module across 4 gpus, one layer each
model = MyLightningModule()
plugin = DDPSequentialPlugin(balance=[1, 1, 1, 1])
trainer = Trainer(accelerator='ddp', gpus=4, plugins=[plugin])
trainer.fit(model)
.. _DDPSequentialPlugin: https://arxiv.org/abs/1811.06965
Pipeline parallelism comes with with checkpointing to reduce peak
memory required to train while minimizing device under-utilization.
This is turned on by default and can be turned off via the checkpoint argument.
You should determine the balance when defining the plugin,
or you can pass an example input array via the LightningModule to infer a balance.
The module will be partitioned into multiple devices according to the given balance. You may also rely on
your own heuristics to find your own optimal configuration.
Args:
balance: The balance of the model, i.e [2, 2] (two layers on each GPU).
If not provided assumes user provides an input example array to find a balance on all GPUs.
microbatches: Allows for parallelization to reduce device utilization
by splitting the batch into further smaller batches.
checkpoint: Enables gradient checkpointing. ['always', 'except_last', 'never']
balance_mode: Type of balance heuristic to use if balance to be inferred.
- 'balance_by_size': checks memory usage of each layer and determines balance
- 'balance_by_time': checks time of each layer and determines balance
pipelined_backward: if True, call torch.autograd.backward once per microbatch on the
backward pass (instead of once for the whole batch). This works
around a potential deadlock in pytorch when using tensor parallelism
at the same time. Defaults to `True` if
`get_model_parallel_world_size() > 1`
"""
self._check_pipe_available()
super().__init__(**kwargs)
self.balance = balance
self.microbatches = microbatches
self.checkpoint = checkpoint
self.balance_mode = balance_mode
self.pipelined_backward = pipelined_backward
self.main_rpc_process = False # Updated by main process, default for all secondary processes
def init_ddp_connection(
self,
trainer,
cluster_environment,
global_rank: int,
world_size: int,
is_slurm_managing_tasks: bool = True,
) -> None:
trainer.prepared_for_backwards = False
self._check_arguments(trainer)
if self._skip_init_connections(trainer):
return
super().init_ddp_connection(
trainer=trainer,
cluster_environment=cluster_environment,
global_rank=global_rank,
world_size=world_size,
is_slurm_managing_tasks=is_slurm_managing_tasks
)
super().init_rpc_connection(
global_rank=global_rank,
world_size=world_size
)
model = trainer.get_model()
self.gpus_per_model = self._infer_check_num_gpus(trainer)
self.init_model_parallel_groups(trainer)
self.set_main_rpc_process()
self._check_sequential_model_exists(model)
if self.main_rpc_process:
if self.balance is None:
self._infer_model_balance(trainer)
self._assert_valid_model_balance(trainer)
def on_before_manual_backward(self, model: DistributedDataParallel, output: Any):
pass
def _infer_model_balance(self, trainer):
log.info(f'Inferring model balance using {self.balance_mode} mode')
model = trainer.get_model()
if model.example_input_array is None:
raise MisconfigurationException(
'Please set example_input_array to your model, so we can infer the right model balance for you')
balance_func = getattr(pipe_balance, self.balance_mode)
self.balance = balance_func(self.gpus_per_model, model.sequential_module, model.example_input_array)
self._sync_balance_to_all_parallel_groups()
log.info(f'The following model balance {self.balance.tolist()} was inferred using {self.balance_mode} mode')
def _sync_balance_to_all_parallel_groups(self, main_rank=0):
"""
Ensures that we sync the balance to all main processes, so that the balance is the same per replica.
Args:
main_rank: The rank with the balance we'd like to replicate.
"""
self.balance = torch.tensor(self.balance, dtype=torch.int, device='cuda')
# Ensure we sync to all processes within the main data parallel group
# We use the data parallel group as all main processes are found within the same group
torch_distrib.broadcast(self.balance, src=main_rank, group=mpu.get_data_parallel_group())
self.balance = self.balance.cpu()
def _check_sequential_model_exists(self, model):
if not hasattr(model, "sequential_module") or not isinstance(model.sequential_module, nn.Sequential):
raise MisconfigurationException(
'Could not find a PipeLightningModule within the model. '
'Did you set your sequential model as the `sequential_module` attribute of your model?')
def _find_and_init_pipe_module(self, model):
if hasattr(model, "sequential_module") and isinstance(model.sequential_module, LightningPipeModule):
# model has been wrapped already
return
elif hasattr(model, "sequential_module") and isinstance(model.sequential_module, nn.Sequential):
# try to wrap model for the user
model.sequential_module = LightningPipeModule(
model.sequential_module,
balance=self.balance,
microbatches=self.microbatches,
checkpoint=self.checkpoint,
)
# Update references for workers to access correct lightning functions when calling RPC
model.sequential_module.trainer = model.trainer
model.sequential_module.configure_optimizers = model.configure_optimizers
# Update references for main process to access correct lightning functions when calling RPC
model.sequential_module.module.model.trainer = model.trainer
model.sequential_module.module.model.configure_optimizers = model.configure_optimizers
else:
raise MisconfigurationException(
'Could not find a PipeLightningModule within the model. '
'Did you defined set your sequential model as an `sequential_module` attribute of your model ?'
)
def _assert_valid_model_balance(self, trainer):
model = trainer.get_model()
if sum(self.balance) != len(model.sequential_module):
raise MisconfigurationException(
f'The provided balance sum: {sum(self.balance)} does not'
f' match your Sequential length: {len(model.sequential_module)}')
def _skip_init_connections(self, trainer):
"""
Skip initialization if torch is already initialized and we're in testing.
Returns:
Whether to skip initialization
"""
return torch_distrib.is_initialized() and trainer.testing
def init_model_parallel_groups(self, trainer):
num_model_parallel = 1 # TODO currently no support for vertical model parallel
mpu.initialize_model_parallel(
model_parallel_size_=num_model_parallel,
pipeline_length=self.gpus_per_model
)
def _infer_check_num_gpus(self, trainer):
"""
Infer the number of GPUs per model.
Args:
trainer: The trainer object.
Returns:
The appropriate balance for the model
"""
if isinstance(self.balance, list):
if len(self.balance) != (trainer.world_size / trainer.num_nodes):
raise MisconfigurationException(
"Pipe currently only supports splitting the module onto all available GPUs"
)
# User has defined a balance for his model
return len(self.balance)
# Assume that the user wants to balance his model on all GPUs
return trainer.world_size
def on_accelerator_exit_rpc_process(self, trainer) -> None:
if not trainer.testing:
torch_distrib.barrier() # Ensure we await main process initialization
# Add trainer/configure_optimizers to the pipe model for access in all worker processes
rpc_pipe.PipeModel.trainer = trainer
del rpc_pipe.PipeModel.trainer.model.sequential_module
rpc_pipe.PipeModel.trainer.model.sequential_module = rpc_pipe.PipeModel
rpc_pipe.PipeModel.configure_optimizers = trainer.model.configure_optimizers
super().on_accelerator_exit_rpc_process(trainer)
def set_main_rpc_process(self):
self.main_rpc_process = torch_distrib.get_rank(group=mpu.get_pipeline_parallel_group()) == 0
def on_main_rpc_connection(self, trainer) -> None:
# Create pipe_module
model = trainer.get_model()
self._find_and_init_pipe_module(model)
if not trainer.testing:
torch_distrib.barrier() # Ensure we join main process initialization
model.sequential_module.foreach_worker(register_optimizers, include_self=True)
def _check_arguments(self, trainer):
if trainer.amp_backend is not None:
raise MisconfigurationException(
'DDPSequentialPlugin is currently not supported in Automatic Mixed Precision')
def configure_ddp(
self,
model: LightningModule, device_ids: List[int]) -> DistributedDataParallel:
model = RPCPlugin(process_group=mpu.get_data_parallel_group()).configure_ddp(model, device_ids)
# Plugin handle backwards across processes. Currently not supported for DDP + pipe parallel
model.require_backward_grad_sync = False
return model
@rank_zero_only
def rpc_save_model(
self,
save_model_fn,
last_filepath,
trainer,
pl_module) -> None:
model = trainer.get_model()
if not hasattr(model.sequential_module, "foreach_worker"):
return
current_layers = pl_module.sequential_module
model.sequential_module.foreach_worker(
save_layers_on_all_rank_zero_workers,
{"gpus_per_model": self.gpus_per_model},
include_self=True
)
pl_module.sequential_module = load_sequential_from_saved_layers(self.gpus_per_model)
save_model_fn(last_filepath, trainer, pl_module)
pl_module.sequential_module = current_layers
def worker_optimizer_step(
self,
model: LightningModule,
opt_idx: int,
*args,
**kwargs) -> None:
model.sequential_module.foreach_worker(
run_optimizer,
{"opt_idx": opt_idx, "args": args, "kwargs": kwargs},
include_self=False
)
def distributed_sampler_kwargs(self, distributed_sampler_kwargs):
return dict(
num_replicas=mpu.get_data_parallel_world_size(),
rank=mpu.get_data_parallel_rank(),
)
@property
def data_parallel_group(self):
return mpu.get_data_parallel_group()
@property
def is_main_rpc_process(self) -> bool:
return self.main_rpc_process
@property
def return_after_exit_rpc_process(self) -> bool:
return True
def barrier(self, name: Optional[str] = None) -> None:
if torch_distrib.is_initialized() and self.is_main_rpc_process:
torch_distrib.barrier(group=self.data_parallel_group)
def _check_pipe_available(self):
if not _FAIRSCALE_PIPE_AVAILABLE:
raise MisconfigurationException(
'PipeRPCPlugin requires FairScale and currently is only supported on PyTorch 1.6.'
)
class LightningPipeModule(nn.Module):
"""
This class wraps Fairscale Pipe and PipeRCPWrapper class.
"""
def __init__(
self,
module: nn.Sequential,
balance: List[int],
microbatches: int = 8,
checkpoint='never'):
super().__init__()
self.module = module
self.balance = balance
self.microbatches = microbatches
self.checkpoint = checkpoint
self._init_pipe()
def _init_pipe(self):
device = torch.device("cuda", torch_distrib.get_rank())
self.module = PipeRPCWrapper(
module=self.module,
balance=self.balance,
chunks=self.microbatches,
style=PipelineStyle.MultiProcess,
input_device=device,
worker_map=self.get_worker_map(),
checkpoint=self.checkpoint,
)
def foreach_worker(self, *args, **kwargs):
self.module.foreach_worker(*args, **kwargs)
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
def get_worker_map(self):
# TODO, is this correct with multinodes? We also assume "worker" is the same as defined in the RPCPlugin
return {rank: f"worker{rank}" for rank in range(torch_distrib.get_world_size())}
def register_optimizers(ctx, model):
optimizers, lr_schedulers, optimizer_frequencies = model.trainer.init_optimizers(model)
model.trainer.optimizers = optimizers
model.trainer.lr_schedulers = lr_schedulers
model.trainer.optimizer_frequencies = optimizer_frequencies
def run_optimizer(ctx, model):
trainer = model.trainer
opt_idx = ctx["opt_idx"]
optimizer = trainer.optimizers[opt_idx]
optimizer.step(*ctx["args"], **ctx["kwargs"])
def save_layers_on_all_rank_zero_workers(ctx, model):
gpus_per_model = ctx["gpus_per_model"]
rank = torch_distrib.get_rank()
if rank in range(gpus_per_model):
seq = list(model.children())[0]
torch.save(seq, f"seq_{rank}.pt")
def load_sequential_from_saved_layers(gpus_per_model):
partial_seqs = [torch.load(f"seq_{rank}.pt", map_location='cpu') for rank in range(gpus_per_model)]
seq = nn.Sequential()
for p_seq in partial_seqs:
for name, child in p_seq.named_children():
seq.add_module(name, child)
# delete tmp files
[os.remove(f"seq_{rank}.pt") for rank in range(gpus_per_model)]
return seq
|
the-stack_0_21968 | import math
import numpy as np
from scipy.constants import g
class Depth:
def __init__(self, depth=0.0, density=1025):
self._depth = depth
self._pressure = 0.0
self._density = density
@property
def depth(self):
return self._depth
@property
def pressure(self):
return self.calculate_pressure(self._depth, self._density)
@staticmethod
def calculate_pressure(depth, density):
'''
Calculate the static pressure of water column by depth and water density.
P = r * g * h
(adapted from https://www.grc.nasa.gov/WWW/k-12/WindTunnel/Activities/fluid_pressure.html)
where:
P: is pressure (Pa)
r(rho): is density of fluid
g: acceleration of gravity (constant)
h: height of fluid above object
Args:
depth: Depth of object (in meters)
density: Density of water column (kg/m^3)
Returns:
pressure: Pressure on object (kPa)
Raises:
ValueError: Non-numeric entry for depth of density
'''
if not isinstance(depth, (int, float)):
raise ValueError("Depth must be a numeric value")
if not isinstance(density, (int, float)):
raise ValueError("Density must be a numeric value")
return density * depth * g / 1000
@staticmethod
def calculate_depth(pressure, density):
'''
Calculate the depth of object based on density and pressure.
h = P / (r * g)
(adapted from https://www.grc.nasa.gov/WWW/k-12/WindTunnel/Activities/fluid_pressure.html)
where:
P: is pressure (Pa)
r(rho): is density of fluid
g: acceleration of gravity (constant)
h: height of fluid above object
Args:
pressure: Presure on object (in kPa)
density: Density of water column (kg/m^3)
Returns:
pressure: Depth of object (m)
Raises:
ValueError: Non-numeric entry for pressure of density
'''
if not isinstance(pressure, (int, float)):
raise ValueError("Pressure must be a numeric value")
if not isinstance(density, (int, float)):
raise ValueError("Density must be a numeric value")
return (pressure * 1000.0) / (density * g) |
the-stack_0_21969 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from decimal import Decimal
import os
from random import randint
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
from test_framework.creditconfig import COINBASE_MATURITY, INITIAL_BLOCK_REWARD
from test_framework.credit import generatesynchronized
class WalletBackupTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
# whitelist all peers to speed up tx relay / mempool sync
self.extra_args = [
["[email protected]", "-keypool=100"],
["[email protected]", "-keypool=100"],
["[email protected]", "-keypool=100"],
["[email protected]"],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
self.sync_mempools()
self.nodes[3].generate(1)
self.sync_blocks()
# As above, this mirrors the original bash test.
def start_three(self):
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(os.path.join(self.nodes[0].datadir, self.chain, 'wallets', 'wallet.dat'))
os.remove(os.path.join(self.nodes[1].datadir, self.chain, 'wallets', 'wallet.dat'))
os.remove(os.path.join(self.nodes[2].datadir, self.chain, 'wallets', 'wallet.dat'))
def run_test(self):
self.log.info("Generating initial blockchain")
self.nodes[0].generate(1)
self.sync_blocks()
self.nodes[1].generate(1)
self.sync_blocks()
self.nodes[2].generate(1)
self.sync_blocks()
generatesynchronized(self.nodes[3], COINBASE_MATURITY, None, self.nodes)
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), INITIAL_BLOCK_REWARD)
assert_equal(self.nodes[1].getbalance(), INITIAL_BLOCK_REWARD)
assert_equal(self.nodes[2].getbalance(), INITIAL_BLOCK_REWARD)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
self.log.info("Backing up")
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak'))
self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak'))
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.log.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
generatesynchronized(self.nodes[3], COINBASE_MATURITY + 1, None, self.nodes)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, (COINBASE_MATURITY+14)*INITIAL_BLOCK_REWARD)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
# Restore wallets from backup
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, self.chain, 'wallets', 'wallet.dat'))
shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, self.chain, 'wallets', 'wallet.dat'))
shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, self.chain, 'wallets', 'wallet.dat'))
self.log.info("Re-starting nodes")
self.start_three()
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', 'wallet.dat'),
os.path.join(self.nodes[0].datadir, self.chain, '.', 'wallets', 'wallet.dat'),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', ''),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets')]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
|
the-stack_0_21973 | # config/config.py
# Configurations.
import logging
import sys
from pathlib import Path
PATH_DATA_RAW = Path("data/raw/")
PATH_DATA_PROCESSED = Path("data/processed/")
MODELS_PATH = Path("models/")
LOGS_DIR = Path("logs/")
KAFKA_HOST = "localhost:9092"
# Logger
logging_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"minimal": {"format": "%(message)s"},
"detailed": {
"format": "%(levelname)s %(asctime)s [%(filename)s:%(funcName)s:%(lineno)d]\n%(message)s\n"
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"stream": sys.stdout,
"formatter": "minimal",
"level": logging.DEBUG,
},
"info": {
"class": "logging.handlers.RotatingFileHandler",
"filename": Path(LOGS_DIR, "info.log"),
"maxBytes": 10485760, # 1 MB
"backupCount": 10,
"formatter": "detailed",
"level": logging.INFO,
},
"error": {
"class": "logging.handlers.RotatingFileHandler",
"filename": Path(LOGS_DIR, "error.log"),
"maxBytes": 10485760, # 1 MB
"backupCount": 10,
"formatter": "detailed",
"level": logging.ERROR,
},
},
"loggers": {
"root": {
"handlers": ["console", "info", "error"],
"level": logging.INFO,
"propagate": True,
},
},
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.