seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8012099265
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file", type=str, help="data file")
parser.add_argument("-R", "--rd", type=float, default=1e3, help="resistor on drain")
parser.add_argument("-D", "--diagnose", action="store_true", help="plot aux polynomial")
parser.add_argument("-I", "--init", type=float, default=10, help="iterative seed")
parser.add_argument("-G", "--gain", type=float, default=25, help="gain desired (dB)")
parser.add_argument("-P", "--plot", action="store_true", help="plot fitted polynomial")
args = parser.parse_args()
try:
data = np.genfromtxt(args.file)
except OSError:
print("File {0:s} does not exist".format(args.file))
exit()
vgs_data, id_data = data[:, 0], data[:, 1]
if args.diagnose:
vx = np.linspace(0, 10, 1000)
vy1 = [((vgs_data - v) @ id_data -
np.square(vgs_data - v) @ id_data * np.sum(np.power(vgs_data - v, 3)) /
np.sum(np.power(vgs_data - v, 4)))
for v in vx]
#vy2 = [((vgs_data @ id_data -
# np.square(vgs_data - v) @ id_data * np.sum(np.power(vgs_data - v, 3)) /
# np.sum(np.power(vgs_data - v, 4))) / np.sum(id_data))
# for v in vx]
#vy3 = [(vgs_data @ id_data - np.sum(id_data) * v +
# np.square(vgs_data - v) @ id_data * np.sum(np.power(vgs_data - v, 3)) /
# np.sum(np.power(vgs_data - v, 4)))]
plt.plot(vx, vy1, color="r")
#plt.plot(vx, vy2, color="g")
#plt.plot(vx, vy3, color="b")
plt.xlabel(r"$v_t$")
plt.ylabel(r"$p(v_t$)")
plt.grid()
plt.show()
exit()
# Biseccion
va, vt = 0, 0
vb = args.init
while not np.isclose(va, vb, rtol=10 * np.finfo(float).eps, atol=0.0):
vt = (va + vb) / 2
d = ((vgs_data - vt) @ id_data -
(np.square(vgs_data - vt) @ id_data / np.sum(np.power(vgs_data - vt, 4))) *
np.sum(np.power(vgs_data - vt, 3)))
if d > 0:
vb = vt
elif d < 0:
va = vt
else:
va = vb
k = id_data @ np.square(vgs_data - vt) / np.sum(np.power(vgs_data - vt, 4))
k2 = id_data @ (vgs_data - vt) / np.sum(np.power(vgs_data - vt, 3))
if not np.isclose(k, k2, rtol=10 * np.finfo(float).eps, atol=0.0):
print("fit invalid, try a different seed estimated from aux polynomial")
exit()
gain_target = 10 ** (args.gain / 20)
vgsq = gain_target / (2 * k * args.rd) + vt
idq = k * (vgsq - vt) ** 2
print("k = {:3.3f} mA/V^2".format(k * 1000))
print("Vt = {:2.3f} V\n".format(vt))
print("Gain = {} dB".format(args.gain))
print("Rd = {} ohm".format(args.rd))
print("Vgsq = {:3.3} V".format(vgsq))
print("Idq = {:2.4} mA".format(1000 * idq))
if args.plot:
vgs = np.linspace(vgs_data[0], vgs_data[len(vgs_data) - 1], 1000)
i_d = k * np.square(vgs - vt)
plt.title(r"$K = {0:3.3f} mA/V^2, V_t = {1:2.3f}V$".format(k * 1000, vt))
plt.xlabel(r"$v_{gs}$")
plt.ylabel(r"$i_d$")
plt.grid()
plt.plot(vgs_data, id_data, marker="o", linestyle=" ")
plt.plot(vgs, i_d, linestyle="-")
plt.show()
|
mvallina/trts
|
nmosfit.py
|
nmosfit.py
|
py
| 3,076 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "numpy.isclose",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.finfo",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.finfo",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 91,
"usage_type": "name"
}
] |
75269781308
|
from sklearn.svm import SVC
import common as c
def run(num_folds, seed, test_size, dataset_url, delimiter, _c, gamma, kernel):
try:
model = SVC(C=_c, gamma=gamma, kernel=kernel)
c.run_with_classification_model(model, num_folds, seed, test_size, dataset_url, delimiter)
except Exception as e:
raise e
|
ingridcoda/serverless-machine-learning
|
algorithms/classification/svm.py
|
svm.py
|
py
| 335 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "sklearn.svm.SVC",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "common.run_with_classification_model",
"line_number": 9,
"usage_type": "call"
}
] |
86667723602
|
import random,os,string,json,requests
from flask_mail import Message
from flask import render_template,url_for,session,request,flash,abort
from werkzeug.utils import redirect
from werkzeug.security import generate_password_hash,check_password_hash
from projectapp import app,db
from projectapp.mymodel import Guest, Lga, State, Gift, Transaction, guest_gift,Document,Questions
from projectapp import mail
@app.route('/', methods=['GET','POST'])
def home():
if request.method=='GET':
try:
response = requests.get('http://127.0.0.1:8030/hostel/api/v1.0/listall/')
hostels = json.loads(response.text)
except requests.exceptions.ConnectionError as e:
hostels=[]
mystates = db.session.query(State).all()
return render_template('user/index.html',mystates=mystates, hostels=hostels)
else:
#retrieve form data
fname = request.form.get('fname')
lname = request.form.get('lname')
state = request.form.get('state')
email = request.form.get('email')
password = request.form.get('password')
#save into database
converted = generate_password_hash(password)
g = Guest(guest_fname=fname,guest_lname=lname,state_id=state,guest_email=email,guest_pwd=converted)
db.session.add(g)
db.session.commit()
#keep details in session
session['user'] = g.id
#save feedback in a flash
flash('Form has been successfully submitted')
#redirect to user/profile
return redirect('user/profile')
@app.route('/user/profile')
def profile():
loggedin_user = session.get('user')
if loggedin_user != None:
data = db.session.query(Guest).get(loggedin_user)
iv = db.session.query(Document).get(1)
return render_template('/user/profile.html',data=data,iv=iv)
else:
return redirect(url_for('login'))
@app.route('/logout')
def logout():
session.pop('user', None)
return redirect('/')
@app.route('/user/login', methods=['GET','POST'])
def login():
if request.method =='GET':
#1 display a template with login form
return render_template('user/login.html')
else:
#2 retrieve form data
username = request.form.get('username')
pwd = request.form.get('pwd')
#3 write a query to fetch from te guest table where username ='' and password =''
deets = db.session.query(Guest).filter(Guest.guest_email==username).first()
#4 if data was fetched, keep the id in session and redirect to profile page
if deets:
loggedin_user = deets.id
hashedpass = deets.pwd
check = check_password_hash(hashedpass,pwd)
if check:
session['user'] = loggedin_user
return redirect('/user/profile')
else:
flash('invalid username or password')
return redirect(url_for('login'))
else:
#5 if data was empty, keep feedback in a flash and redirect to homepage/login page
flash('invalid username or password')
return redirect(url_for('login'))
@app.route('/user/gift',methods=['GET','POST'])
def gift():
loggedin_user = session.get('user')
if loggedin_user:
if request.method == 'GET':
mygifts = db.session.query(Gift).all()
return render_template('user/gift.html',mygifts=mygifts)
else:
#retrieve form data
selectedgift = request.form.getlist('item')
if selectedgift:
for i in selectedgift:
totalqty = 'quantity'+str(i)
total = request.form.get(totalqty,1)
statement = guest_gift.insert().values(gift_id=i, guest_id=loggedin_user,qty=total)
db.session.execute(statement)
db.session.commit()
flash('Thank you for your donation')
return redirect('/user/profile')
else:
flash('Please select at least one gift item')
return redirect('/user/gift')
else:
return redirect('/login')
@app.route('/about-me')
def about():
pwd = app.config['PASSWORD']
return render_template('user/about.html',pwd=pwd)
@app.route('/addpicture', methods=['POST','GET'])
def uploadpix():
if session.get('user') != None:
if request.method =='GET':
return render_template()
else:
fileobj = request.files['pix']
original = str(random.random() * 10000000) + fileobj.filename
destination = 'projectapp/static/images/guest/test.jpg'
fileobj.save(destination)
guestid = session.get('user')
guest = db.session.query(Guest).get(guestid)
guest.profile_pix = original
db.session.commit()
return redirect('/user/profile')
else:
return redirect('/login')
@app.route('/user/addpicture', methods=['GET','POST'])
def addpicture():
if session.get('user') != None:
if request.method=='GET':
return render_template('user/upload.html')
else: #form is submitted
fileobj = request.files['pic']
if fileobj.filename == '':
flash('Please select a file')
return redirect(url_for('addpicture'))
else:
#get the file extension, #splits file into 2 parts on the extension
name, ext = os.path.splitext(fileobj.filename)
allowed_extensions=['.jpg','.jpeg','.png','.gif']
if ext not in allowed_extensions:
flash(f'Extension {ext}is not allowed')
return redirect(url_for('addpicture'))
else:
sample_xters = random.sample(string.ascii_lowercase,10)
newname = ''.join(sample_xters) + ext
destination = 'projectapp/static/images/guest/'+newname
fileobj.save(destination)
##save the details in the db
guestid = session.get('user')
guest = db.session.query(Guest).get(guestid)
guest.profile_pix=newname
db.session.commit()
return redirect('/user/profile')
else:
return redirect(url_for('login'))
@app.route('/user/question')
def contact():
if session.get('user') != None:
return render_template('quest.html')
else:
return redirect(url_for('login'))
@app.route('/user/question-ajax')
def questionajax():
if session.get('user') != None:
return render_template('quest.html')
else:
return redirect(url_for('login'))
@app.route('/user/submitquestion',methods=['POST','GET'])
def submitquestion():
loggedin = session.get('user')
if loggedin != None:
quest = request.form.get('quest')
q = Questions(question=quest,guest_id=loggedin)
db.session.add(q)
db.session.commit()
flash('thank you for asking')
return redirect(url_for('userprofile'))
else:
return redirect(url_for('login'))
@app.route('/user/submitajax',methods=['POST','GET'])
def submitajax():
loggedin = session.get('user')
if loggedin != None:
quest = request.form.get('quest')
first = request.form.get('fname')
last = request.form.get('lname')
csrf_token = request.form.get('csrf_token')
pixobj = request.files['pix']
filename = pixobj.filename
q = Questions(question=quest,guest_id=loggedin)
db.session.add(q)
db.session.commit()
return f"Thank you {first} {last}, Your Questions has been asked, the CSRF TOKEN IS {csrf_token}, and file is {filename}"
else:
return "You need to log in to ask a question"
@app.route('/user/availability', methods={"GET","POST"})
def check_availability():
if request.method == 'GET':
records = db.session.query(State).all()
return render_template('user/test.html',record=records)
else:
user = request.form.get('user')
deets = db.session.query(Guest).filter(Guest.guest_email==user).all()
if deets:
rsp = {"msg":"You have registered with this email", "status":"failed"}
return json.dumps(rsp)
else:
rsp = {"msg":"Username available", "status":"success"}
return json.dumps(rsp)
@app.route('/user/lga')
def lga():
state = request.args.get('id')
data = db.session.query(Lga).filter(Lga.state_id==state).all()
tosend = "<select class='form-control' name='>"
for t in data:
tosend= tosend + f"<option>{t.lga_name}</option>"
tosend=tosend+"</select>"
return tosend
@app.route('/user/donate-cash/',methods=["POST","GET"])
def donate_cash():
loggedin = session.get('user')
if loggedin:
if request.method == 'GET':
return render_template('user/cashin.html')
else:
cash =request.form.get('amt')
return "Form submitteed here "
else:
abort(403)
@app.route('/user/payup')
def paystack():
loggedin = session.get('user')
if loggedin:
return "Transactions completed"
else:
abort(403)
def refno():
sample_xters = random.sample(string.digit,10)
newname=''.join(sample_xters)
return newname
@app.route('/user/paycash',methods=["GET","POST"])
def paycash():
if session.get('user') != None:
if request.method=="GET":
return render_template('user/cashins.html')
else:
user = session.get('user')
cashed = request.form.get('amt',0)
ref=refno()
session['trxref'] = ref
inst = Transaction(trx_guestid=user,trx_amt=cashed,trx_status='pending',trx_ref=ref)
db.session.add(inst)
db.session.commit()
return redirect("confirmpay")
else:
return redirect(url_for('login'))
@app.route('/user/confirmpay',methods=['GET','POST'])
def confirmpay():
if session.get('user') !=None and session.get('trxref') !=None:
ref = session.get('trxref')
deets = db.session.query(Transaction).filter(Transaction.trx_ref==ref).first()
if request.method=='GET':
return render_template('user/confirmpay.html',deets=deets)
else:
#connect to paystack endpoint
amount = deets.trx_amt * 100
email=deets.guest.email
headers = {"Content-Type": "application/json","Authorization":"Bearer sk_test_c41b8a36f3b1e4cec6e476893c630d7a171b7d7a"}
data = {"reference": ref, "amount": amount, "email": email}
response = requests.post('https://api.paystack.co/transaction/initialize', headers=headers, data=json.dumps(data))
rsp = json.loads(response.text)
if rsp.get('status') == True:
payurl = rsp['date']['authorization_url']
return redirect(payurl)
else:
return redirect(url_for('paycash'))
else:
return redirect(url_for('login'))
@app.route('/user/testmail')
def testmail():
msg = Message("Testing Mail","[email protected]",
recipients=['[email protected]'],body='Test Mail')
fp = open('requirements.txt')
msg.html = "<div><h1>Welcome user</h1><p>You have successfully logged in clents are waiting for you</p><hr> Signed by Management</div><img src='select the image from the network if necessary'>"
msg.attach("requirements.txt", "application/txt", fp.read())
check = mail.send(msg)
if check:
return "Mail was sent"
else:
return "Checking Mail sending failed...."
|
abrajoe/project
|
projectapp/myroutes/user.py
|
user.py
|
py
| 11,939 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.request.method",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db.session.query",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.State",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "projectapp.db.session",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "werkzeug.security.generate_password_hash",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.Guest",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session.add",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.commit",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.query",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.Guest",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "projectapp.db.session",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.query",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.Document",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "projectapp.db.session",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flask.session.pop",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.query",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.Guest",
"line_number": 66,
"usage_type": "argument"
},
{
"api_name": "projectapp.db.session",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "projectapp.mymodel.Guest.guest_email",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "werkzeug.security.check_password_hash",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.query",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.Gift",
"line_number": 88,
"usage_type": "argument"
},
{
"api_name": "projectapp.db.session",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "flask.request.form.getlist",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "projectapp.mymodel.guest_gift.insert",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.guest_gift",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.execute",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.commit",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "projectapp.app.config",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "projectapp.app",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "flask.request.files",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "random.random",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.query",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.Guest",
"line_number": 128,
"usage_type": "argument"
},
{
"api_name": "projectapp.db.session",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.commit",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "flask.request.files",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "flask.flash",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "string.ascii_lowercase",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "flask.session.get",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.query",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.Guest",
"line_number": 165,
"usage_type": "argument"
},
{
"api_name": "projectapp.db.session",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.commit",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "projectapp.mymodel.Questions",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session.add",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.commit",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "flask.request.files",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "projectapp.mymodel.Questions",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session.add",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.commit",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "projectapp.app.route",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.query",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.State",
"line_number": 230,
"usage_type": "argument"
},
{
"api_name": "projectapp.db.session",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.query",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.Guest",
"line_number": 235,
"usage_type": "argument"
},
{
"api_name": "projectapp.db.session",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "projectapp.mymodel.Guest.guest_email",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.query",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.Lga",
"line_number": 249,
"usage_type": "argument"
},
{
"api_name": "projectapp.db.session",
"line_number": 249,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "projectapp.mymodel.Lga.state_id",
"line_number": 249,
"usage_type": "attribute"
},
{
"api_name": "projectapp.app.route",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "random.sample",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "string.digit",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "flask.session.get",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "projectapp.mymodel.Transaction",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session.add",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.commit",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "projectapp.db.session",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "projectapp.db.session.query",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "projectapp.mymodel.Transaction",
"line_number": 308,
"usage_type": "argument"
},
{
"api_name": "projectapp.db.session",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "projectapp.db",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "projectapp.mymodel.Transaction.trx_ref",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "flask.request.method",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "projectapp.app.route",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "flask_mail.Message",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "projectapp.mail.send",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "projectapp.mail",
"line_number": 338,
"usage_type": "name"
},
{
"api_name": "projectapp.app.route",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "projectapp.app",
"line_number": 331,
"usage_type": "name"
}
] |
39404813463
|
from django.urls import path
from .views import (
ColaboradorList,
ColaboradorUpdate,
ColaboradorDelete,
ColaboradorCreate,
ColaboradorReport,
HtmlPdf,
)
urlpatterns = [
path('listar', ColaboradorList.as_view(), name='list_colaborador'),
path('criar', ColaboradorCreate.as_view(), name='create_colaborador'),
path('editar/<int:pk>', ColaboradorUpdate.as_view(), name='update_colaborador'),
path('excluir/<int:pk>', ColaboradorDelete.as_view(), name='delete_colaborador'),
path('relatorio', ColaboradorReport, name='report_colaborador'),
path('relatorio_html', HtmlPdf.as_view(), name='report_colaborador_html'),
]
|
fabiogpassos/GRH
|
apps/colaboradores/urls.py
|
urls.py
|
py
| 664 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.ColaboradorList.as_view",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.ColaboradorList",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.ColaboradorCreate.as_view",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.ColaboradorCreate",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.ColaboradorUpdate.as_view",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.ColaboradorUpdate",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.ColaboradorDelete.as_view",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.ColaboradorDelete",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "views.ColaboradorReport",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "views.HtmlPdf.as_view",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "views.HtmlPdf",
"line_number": 18,
"usage_type": "name"
}
] |
43431937253
|
#!/usr/bin/env python3
""" Evaluator """
import sys
import tensorflow as tf
from utils import decode_img, image_patches, write_tensor_as_image
from model import image_diff, UPSCALER_FACTOR
def main():
""" Main function """
try:
image_path = sys.argv[1]
except:
print("Usage: {} <image path>".format(sys.argv[0]))
exit(-1)
try:
model_path = sys.argv[2]
except:
model_path = './saved_model'
PATCH_SIZE = 240 // UPSCALER_FACTOR
N_CHANNELS = 3
model = tf.keras.models.load_model(model_path)
image = decode_img(image_path, N_CHANNELS)
patches = image_patches(image, PATCH_SIZE, PATCH_SIZE, N_CHANNELS)
model_out = model(patches)
for idx, (patch_in, patch_out) in enumerate(zip(patches, model_out)):
write_tensor_as_image("{}a.png".format(idx), patch_in)
write_tensor_as_image("{}b.png".format(idx), patch_out)
#image = tf.expand_dims(image, axis=0)
#model_out = model(image)
#model_out = tf.squeeze(model_out, axis=0)
#write_tensor_as_image("out.png", model_out)
if __name__ == "__main__":
main()
|
Masterchef365/ENHANCE
|
eval.py
|
eval.py
|
py
| 1,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "model.UPSCALER_FACTOR",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "utils.decode_img",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "utils.image_patches",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "utils.write_tensor_as_image",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "utils.write_tensor_as_image",
"line_number": 32,
"usage_type": "call"
}
] |
22546077851
|
import pygame as pg
import enum
import random
import numpy as np
import pprint
from collections import Counter
from sudokuBibli import list_sudokus
class Sudoku:
"""
class pour le sudoku
"""
def __init__ (self):
self.SOLUTIONS = []
self.grille_initiale = random.choice(list_sudokus) # Pour avoir la grille initiale avec les nombres que l'on ne peut pas changer
# self.grille_initiale = list_sudokus[2] # Pour avoir la grille initiale avec les nombres que l'on ne peut pas changer
self.grille = [row[:] for row in self.grille_initiale] # Avoir la grille que l'on va changer au fur et à mesure
# Dimensions
self.number_of_rows = len(self.grille)
self.number_of_columns = len(self.grille[0])
if self.number_of_columns != self.number_of_rows: raise Exception("The sudoku given is not a square !")
self.size = (self.number_of_rows, self.number_of_columns)
self.number_of_carres = None
def addNumber(self, position, number):
"""
Input : a tuple of coordinates (x, y) and a number
Add a number to the grid
"""
x, y = position
#! Exceptions
if not 1<=number<=self.number_of_rows: raise Exception("Number incorrect !") # Le nombre n'est pas compris ente 1 et 9
if not 0<=x<self.number_of_rows: raise Exception("X coordinate isn't is the sudoku") # La coordonnée X est trop grande ou trop petite
if not 0<=y<self.number_of_columns: raise Exception("Y coordinate isn't is the sudoku") # La coordonnée Y est trop grande ou trop petite
if self.grille_initiale[x][y] != 0: raise Exception("This number cannot be modified !") # Ce nombre ne peut pas être modifié !
if self.possible(position, number) :
self.grille[x][y] = number
else :
raise Exception("It is not possible !")
def rowCorrect(self, row):
"""
Input : the index of a row
Output : if this row is correct
"""
row = [i for i in self.grille[row] if i!=0] # ligne sans les zéros
return len(set(row)) == len(row) # vérifie si la liste n'a pas de doublons (autres que 0) --> correcte
def columnCorrect(self, columnID):
"""
Input : the index of a column
Output : if this column is correct
"""
column = [self.grille[i][columnID] for i in range(len(self.grille)) if self.grille[i][columnID]!=0]
return len(set(column)) == len(column) # Vérifie si la liste sans doublons a la même longueur que la liste normale = pas de doublons
def giveCarre(self, carreId):
"""
Input : a tuple of positions for a square
Output : a one-dimensional list of all values inside the square
"""
x, y = carreId
return [self.grille[i][j] for i in range (x*3, x*3+3) for j in range (x*3, x*3+3)]
def carreCorrect(self, carreID):
"""
https://www.guru99.com/python-counter-collections-example.html --> idée
"""
carre = [i for i in self.giveCarre(carreID) if i!=0]
return len(set(carre)) == len(carre)
def isCorrect(self):
"""
Output : None
Output : if the grid is correct
"""
if all ([self.rowCorrect(i) for i in range (len(self.grille[0]))]) :
if all ([self.columnCorrect(i) for i in range (len(self.grille))]) :
if all ([self.carreCorrect((x, y)) for x in range (len(self.grille)//3) for y in range (len(self.grille)//3)]) :
return True
return False
def possible(self, coordinates, number):
"""
return if the choice is possible
"""
y,x = coordinates
# Verify row
for i in range(self.number_of_columns):
if self.grille[y][i] == number:
return False
# Verify column
for i in range(self.number_of_rows):
if self.grille[i][x] == number:
return False
# Verify square
x0 = (x//3)*3
y0 = (y//3)*3
for i in range(3):
for j in range(3):
if self.grille[y0+i][x0+j] ==number:
return False
return True
def solve(self):
"""
Solve the sudoku
"""
for y in range(9):
for x in range(9):
if self.grille[y][x] == 0:
for n in range(1,10):
if self.possible((y,x), n):
# print(self)
self.grille[y][x] = n
draw_sudoku(self)
self.solve()
self.grille[y][x] = 0 # ON retourne au point de départ
return
print(self)
input("More")
def create(self):
"""
Creates a 9x9 sudoku
"""
pass
def __repr__(self):
"""
Affiche
"""
return str(np.matrix(self.grille))
# return str(pprint.pprint(self.grille))
# if __name__ == "__main__":
# sudoku = Sudoku()
# while True:
# x = eval(input("Give the x"))
# y = eval(input("Give the y"))
# number = eval(input("New Number"))
pg.font.init()
#* ================== COLORS =====================
class Color(enum.Enum):
WHITE = (255,255,255)
BLACK = (0,0,0)
GRAY = (200,200,200)
RED = (255,0,0)
BLUE = (0,0,255)
#* ================== Window =====================
WIDTH, HEIGHT = 750, 750 # Largeur et hauteur de la fenêtre
WIN = pg.display.set_mode((WIDTH, HEIGHT)) # Définir la fenêtre
pg.display.set_caption("Sudoku") # Titre de la fenêtre
#* ========= FONTS ============
NUMBER_FONT = pg.font.SysFont("comicsans", 40)
def draw_sudoku(sudoku):
"""
Draw a sudoku on the window
"""
x, y = 70,70 # Starting point
ROWS = sudoku.number_of_rows
gap = WIDTH // (ROWS+2)
WIN.fill(Color.WHITE.value) # Fond d'écran blanc
#Affichage de la grille
for i in range(ROWS+1):
if i % 3 == 0:
col = Color.BLACK.value
else:
col = Color.GRAY.value
pg.draw.line(WIN, col, (x,int(y+i*gap)),(WIDTH-x, y+i*gap), 3) # Horizontal
pg.draw.line(WIN, col, (x+i*gap,y),(x+i*gap,HEIGHT-y), 3) # Vertical
# Affichage des numéros
for index_row, row in enumerate(sudoku.grille):
for index_column, number in enumerate(row):
if number != 0:
if sudoku.grille_initiale[index_row][index_column]!=0:
text = NUMBER_FONT.render(str(number), 1, Color.RED.value)
else:
text = NUMBER_FONT.render(str(number), 1, Color.BLACK.value)
WIN.blit(text, (x+index_column*gap+int(gap*0.4),y+index_row*gap+int(gap*0.4)))
pg.display.update()
def main():
sudoku = Sudoku()
draw_sudoku(sudoku)
input("Start ?")
sudoku.solve()
input("fin")
main()
# def placeNumbers(sudoku_array):
# font = pg.font.Font(None, 36)
# for row in range(9):
# for col in range(9):
# if sudoku_array[row, col] != 0:
# integer = font.render(str(sudoku_array[row, col]), 1, black)
# display.blit(integer, (15 + col * 45, 10 + row * 45))
# pg.display.flip()
|
PsychoLeo/Club_Informatique
|
4-Sudoku/graphicDisplay.py
|
graphicDisplay.py
|
py
| 7,659 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.choice",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sudokuBibli.list_sudokus",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "numpy.matrix",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "pygame.font.init",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "enum.Enum",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 194,
"usage_type": "attribute"
}
] |
21478478680
|
import logging, datetime, sys
from modules import *
args = parser.parse_args()
start_time = datetime.datetime.now()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create a file handler for INFO
handler = logging.FileHandler(CONFIG['log_path'] + 'info_the_release_note.log')
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
# create a file handler for DEBUG
debug_handler = logging.FileHandler(CONFIG['log_path'] + 'debug_the_release_note.log')
debug_handler.setLevel(logging.DEBUG)
debug_handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
if args.debug:
print("Now running in debug mode.")
logger.setLevel(logging.DEBUG)
logger.addHandler(debug_handler)
dzr = Deezer()
weekday = datetime.datetime.today().weekday()
# Retrieve users, either from args of a contact list
if args.user:
args.do_not_send = True if not args.email else False
users = [{ 'deezer_user_id': int(user), 'email': args.email } for user in args.user]
else:
try:
users = getContacts(args.contact_list_id) if args.contact_list_id else getContacts(CONFIG['contact_list_id'])
except Exception as e:
logger.info("An error occured while trying to retrieve the contact list.")
logger.debug(e)
sys.exit(2)
logger.info(str(len(users)) + ' users found.')
logger.debug(users)
for user in users:
print("Checking new releases for user id " + str(user['deezer_user_id']) + "...")
logger.info("Checking new releases for user id " + str(user['deezer_user_id']) + "...")
if args.released_since:
released_since = args.released_since
else:
try:
# For weekly users, send new releases on friday only
if weekday != 4 and user['frequency'] == 'weekly':
logger.debug("Skipping this user as he's a weekly user and will only receive new releases on Friday.")
continue
else:
released_since = {
'daily': 1,
'weekly': 7
}.get(user['frequency'], 1)
except KeyError as e:
logger.debug("Frequency setting not found. Fallback to default value.")
released_since = 1
except Exception as e:
logger.debug("An error occured while trying to retrieve the frequency setting:")
logger.debug(e)
continue
try:
new_releases = dzr.getNewReleases(user['deezer_user_id'], released_since)
except IOError as e:
logger.debug("Stopwords and banned artists could not be retrieved.")
logger.debug(e)
sys.exit(2)
except Exception as e:
logger.debug(e)
sys.exit(2)
nb_releases = len(new_releases)
logger.info("User id " + str(user['deezer_user_id']) + " has " + str(nb_releases) + " albums released in the past " + str(released_since) + " days.")
logger.debug(new_releases)
if nb_releases < 1:
continue
# Store new releases into database
try:
db = Database()
db.storeNewReleases(new_releases, user['deezer_user_id'])
del(db)
except Exception as e:
logger.info("An error occured while trying to store the new releases in the database.")
logger.debug(e)
# Send new releases by email
subject = "♩ Have you listened to " + new_releases[0]['artist']['name'] + "'s new album ?"
contenthtml = get_template(new_releases, user['deezer_user_id'])
if not args.do_not_send:
try:
send = sendMail(CONFIG['from_mail'], CONFIG['from_name'], user['email'], subject, contenthtml)
logger.info("Sending email - Status: " + str(send.status_code))
logger.debug(send.headers)
except Exception as e:
logger.info("An error occured while trying to send the mail.")
logger.debug(e)
sys.exit(2)
print('Done')
logger.info("Done in %s seconds " % (datetime.datetime.now() - start_time).total_seconds())
|
greird/the-release-note
|
the-release-note.py
|
the-release-note.py
|
py
| 3,711 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "logging.FileHandler",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.today",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 113,
"usage_type": "attribute"
}
] |
24931817284
|
from json import dumps, loads
from State import State
class Api:
"""
A class that provides methods for encoding and decoding
States to and from JSON strings.
Methods:
- Encode(states: list[State]) -> str:
Encodes a list of State objects to a JSON string.
- Decode(jsonString: str) -> State:
Decodes a JSON string to a State object.
"""
def Encode(states: list[State]) -> str:
"""
Encodes a list of State objects to a JSON string.
Args:
- states (list[State]):
A list of State objects to encode.
Returns:
- str:
A JSON string representing the list of State objects.
"""
return dumps([state.__dict__ for state in states])
def Decode(jsonString: str) -> State:
"""
Decodes a JSON string to a State object.
Args:
- jsonString (str):
A JSON string to decode.
Returns:
- State:
A State object representing the decoded JSON string.
"""
obj = loads(jsonString)
return State(
obj['Board'],
obj['Direction'],
(obj['EmptyPoint']['X'], obj['EmptyPoint']['Y'])
)
|
Saeed-Ayman/8-puzzle
|
API.py
|
API.py
|
py
| 1,287 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "State.State",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "State.State",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "State.State",
"line_number": 31,
"usage_type": "name"
}
] |
17529991766
|
import os, glob, asyncio
class CommandDispatcher:
"""Register commands and run them"""
def __init__(self):
self.commands = {}
self.commands_admin = []
self.unknown_command = None
def get_admin_commands(self, bot, conv_id):
"""Get list of admin-only commands (set by plugins or in config.json)"""
commands_admin = bot.get_config_suboption(conv_id, 'commands_admin') or []
return list(set(commands_admin + self.commands_admin))
@asyncio.coroutine
def run(self, bot, event, *args, **kwds):
"""Run command"""
try:
func = self.commands[args[0]]
except KeyError:
if self.unknown_command:
func = self.unknown_command
else:
raise
args = list(args[1:])
try:
yield from func(bot, event, *args, **kwds)
except Exception as e:
print(e)
def register(self, *args, admin=False):
"""Decorator for registering command"""
def wrapper(func):
# Automatically wrap command function in coroutine
func = asyncio.coroutine(func)
self.commands[func.__name__] = func
if admin:
self.commands_admin.append(func.__name__)
return func
# If there is one (and only one) positional argument and this argument is callable,
# assume it is the decorator (without any optional keyword arguments)
if len(args) == 1 and callable(args[0]):
return wrapper(args[0])
else:
return wrapper
def register_unknown(self, func):
"""Decorator for registering unknown command"""
# Automatically wrap command function in coroutine
func = asyncio.coroutine(func)
self.unknown_command = func
return func
# Create CommandDispatcher singleton
command = CommandDispatcher()
# Build list of commands
_plugins = glob.glob(os.path.join(os.path.dirname(__file__), "*.py"))
__all__ = [os.path.splitext(os.path.basename(f))[0] for f in _plugins
if os.path.isfile(f) and not os.path.basename(f).startswith("_")]
# Load all commands
from hangupsbot.commands import *
|
xmikos/hangupsbot
|
hangupsbot/commands/__init__.py
|
__init__.py
|
py
| 2,229 |
python
|
en
|
code
| 105 |
github-code
|
6
|
[
{
"api_name": "asyncio.coroutine",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "asyncio.coroutine",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "asyncio.coroutine",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 65,
"usage_type": "call"
}
] |
11353299783
|
"""
Problem Statement
Given a binary tree, populate an array to represent its level-by-level traversal.
You should populate the values of all nodes of each level from left to right in separate sub-arrays.
"""
from collections import deque
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
def traverse(root):
result = []
deq = deque()
if root:
deq.append(root)
while deq:
length = len(deq)
node_list = []
for _ in range(length):
current_node = deq.popleft()
node_list.append(current_node.val)
if current_node.left:
deq.append(current_node.left)
if current_node.right:
deq.append(current_node.right)
if node_list:
result.append(node_list)
return result
def main():
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(1)
root.left.left = TreeNode(9)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
print("Level order traversal: " + str(traverse(root)))
main()
|
jihoonyou/problem-solving
|
Educative/bfs/example1.py
|
example1.py
|
py
| 1,137 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 17,
"usage_type": "call"
}
] |
7165790234
|
import argparse
import logging
import sys
from itertools import chain
from logging import getLogger
from typing import Iterable, Optional, Union
from competitive_verifier import oj
from competitive_verifier.arg import add_verify_files_json_argument
from competitive_verifier.error import VerifierError
from competitive_verifier.log import configure_logging
from competitive_verifier.models import (
ProblemVerification,
VerificationFile,
VerificationInput,
)
from competitive_verifier.resource import ulimit_stack
logger = getLogger(__name__)
UrlOrVerificationFile = Union[str, VerificationFile]
def parse_urls(
input: Union[UrlOrVerificationFile, Iterable[UrlOrVerificationFile]]
) -> Iterable[str]:
def parse_single(url_or_file: UrlOrVerificationFile) -> Iterable[str]:
if isinstance(url_or_file, str):
return (url_or_file,)
else:
return enumerate_urls(url_or_file)
if isinstance(input, (str, VerificationFile)):
return parse_single(input)
return chain.from_iterable(parse_single(uf) for uf in input)
def enumerate_urls(file: VerificationFile) -> Iterable[str]:
for v in file.verification:
if isinstance(v, ProblemVerification):
yield v.problem
def run_impl(
input: Union[UrlOrVerificationFile, Iterable[UrlOrVerificationFile]],
check: bool = False,
group_log: bool = False,
) -> bool:
result = True
try:
ulimit_stack()
except Exception:
logger.warning("failed to increase the stack size[ulimit]")
for url in parse_urls(input):
if not oj.download(url, group_log=group_log):
result = False
if check and not result:
raise VerifierError("Failed to download")
return result
def run(args: argparse.Namespace) -> bool:
logger.debug("arguments=%s", vars(args))
logger.info("verify_files_json=%s", str(args.verify_files_json))
logger.info("urls=%s", args.urls)
files: list[VerificationFile] = []
if args.verify_files_json:
verification = VerificationInput.parse_file_relative(args.verify_files_json)
files = list(verification.files.values())
return run_impl(files + args.urls, group_log=True)
def argument(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
add_verify_files_json_argument(parser, required=False)
parser.add_argument(
"urls",
nargs="*",
help="A list of problem URL",
)
return parser
def main(args: Optional[list[str]] = None) -> None:
try:
configure_logging(logging.INFO)
parsed = argument(argparse.ArgumentParser()).parse_args(args)
if not run(parsed):
sys.exit(1)
except Exception as e:
sys.stderr.write(str(e))
sys.exit(2)
if __name__ == "__main__":
main()
|
competitive-verifier/competitive-verifier
|
src/competitive_verifier/download/main.py
|
main.py
|
py
| 2,823 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "competitive_verifier.models.VerificationFile",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "competitive_verifier.models.VerificationFile",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "competitive_verifier.models.VerificationFile",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "competitive_verifier.models.ProblemVerification",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "typing.Iterable",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "competitive_verifier.resource.ulimit_stack",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "competitive_verifier.oj.download",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "competitive_verifier.oj",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "competitive_verifier.error.VerifierError",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "argparse.Namespace",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "competitive_verifier.models.VerificationFile",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "competitive_verifier.models.VerificationInput.parse_file_relative",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "competitive_verifier.models.VerificationInput",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "competitive_verifier.arg.add_verify_files_json_argument",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "competitive_verifier.log.configure_logging",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 93,
"usage_type": "call"
}
] |
39732132661
|
from __future__ import absolute_import, division, print_function
import math
import csv
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
import random
rows=[]
id=[]
datadir = "../../data/"
with open(datadir+"colaus1.focus.raw.csv", "r") as csvFile:
reader = csv.reader(csvFile)
for row in reader:
r = [float(n) for n in row]
rows.append(r[1:])
id.append(int(r[0]))
csvFile.close()
ppm = rows[0]
rows = rows[1:]
id = id[1:] # enleve le 0 inutile
# graphique pour visualiser
# plt.plot(rows[0], rows[15])
# plt.title(id[15])
# plt.xlabel("ppm")
# plt.axis(ymax = 2500000, ymin = 0, xmin=8.51, xmax=0.8)
# plt.show()
s = []
i = []
with open(datadir+"sex.csv", "r") as csvFile:
reader2 = csv.reader(csvFile)
for row in reader2:
s.append(row[1])
i.append(row[0])
sex = [int(n) for n in s[1:]]
id2 = [int(n) for n in i[1:]]
csvFile.close()
######################## Preparation des donnees #########################################
# diviser les donnees par le max --> entre 0 et 1
sexe = []
i=0
while i < len(id) :
n=0
while n < len(id2) :
if id2[n] == id[i]:
sexe.append(sex[n])
n += 1
i += 1
m=[]
for n in rows :
m.append(max(n))
sexe=np.array(sexe)
spectro = []
s = []
max = max(m)
i = 0
while i < len(rows) :
for n in rows[i] :
s.append(n/max)
spectro.append(s)
s = []
i += 1
spectro = np.array(spectro)
sexe = np.array(sexe)
# randomisation des echantillons
# utiliser la fonction numpy.choice(liste, combien on en veut, replace =F)
t = random.sample(range(0,len(spectro)), 874) # 90% pour le training set
e = list(range(0,len(spectro)))
for i in t:
for j in t:
if j == i:
e.remove(j)
v = random.sample(t, 88) # 10% du training set pour la validation set
for i in v:
for j in t:
if j == i:
t.remove(j)
train_spectro = spectro[t]
train_sex = sexe[t]
val_spectro = spectro[v]
val_sex = sexe[v]
test_spectro = spectro[e]
test_sex = sexe[e]
################## Creation du modele ###################################################
model = keras.Sequential()
model.add(keras.layers.Dense(80, activation=tf.nn.relu, input_shape=(1642,)))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
################## Compilation du modele avant de l'entrainer ###########################
model.compile(optimizer='adam', # Comment le modele est telecharges de la base it sees and its loss function.
loss='binary_crossentropy', # mesure la precision du modele pendant l'entrainement, on veut minimiser pour qu'il aille dans la bonne direction
metrics=['accuracy'])
# accuracy: the fraction of the images that are correctly classified
history = model.fit(train_spectro,
train_sex,
epochs=60,
batch_size=10,
validation_data=(val_spectro, val_sex),
verbose = 1)
################## Evaluation du modele #################################################
print("\n")
print("Evaluation :")
results = model.evaluate(test_spectro, test_sex)
history_dict = history.history
history_dict.keys()
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss', color ="blue")
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss', color ="blue")
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc', color ="blue")
plt.plot(epochs, val_acc, 'b', label='Validation acc', color ="blue")
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
|
arnaud456/deep-learning-RMN-UNIL
|
Phenotype_sex_marine.py
|
Phenotype_sex_marine.py
|
py
| 4,016 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "csv.reader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.Sequential",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "tensorflow.nn",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "tensorflow.nn",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 166,
"usage_type": "name"
}
] |
19757814729
|
# network.messenger.py
from __future__ import annotations
from collections import defaultdict
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING
from typing import overload
import tinydb as tdb
from config.base import APP_USERDATA_DIR
from storage.tinyDao import TinyDao
if TYPE_CHECKING:
from typing import ClassVar, List
from typing import overload
from storage.base import Dao
from network.connection import Request
from shop.product import Product
from shop.shop import Shop
# TODO unit test
class Discord:
def __init__(self, request: Request, repo: Repo = None):
self._request = request
self._repo = repo or Repo() # Fallback to default repo if not given
self._payload = defaultdict(dict)
@overload
async def send(self, productMsg: Product, shop: Shop):
...
@overload
async def send(self, logMsg: str):
...
@overload
async def send(self, errorMsg: str):
...
async def send(self, **kwargs):
productMsg = kwargs.get("productMsg")
logMsg = kwargs.get("logMsg")
errorMsg = kwargs.get("errorMsg")
shop = kwargs.get("shop")
""" Argument is mandatory when productMsg is given """
apiEndpoint = self._repo.findWebhookApiEndpoint().rstrip("/")
if productMsg:
msgConfig = self._repo.findProductMessageConfig()
self._setProductPayload(msgConfig, product=productMsg, shop=shop)
elif logMsg:
msgConfig = self._repo.findLogMessageConfig()
self._setLogPayload(msgConfig, msgText=logMsg)
elif errorMsg:
msgConfig = self._repo.findErrorMessageConfig()
self._setErrorPayload(msgConfig, msgText=errorMsg)
else:
raise AttributeError("None of the expected arguments were given. "
f"Arguments are: {kwargs}")
endpoint = "/".join((apiEndpoint, msgConfig.user, msgConfig.token))
# Prepare request
self._request.configure(
timeout=msgConfig.timeout,
maxRetries=msgConfig.maxRetries,
useRandomProxy=msgConfig.useRandomProxy)
postParams = self._request.Params(
url=endpoint,
data=self._payload,
headers={"Content-Type": "application/json"})
# Send message
await self._request.post(params=postParams)
def _setProductPayload(self, msgConfig: MessageConfig, product: Product, shop: Shop):
if not product:
raise AttributeError(f"No 'product' given. Actual value: {product}")
if not shop:
raise AttributeError(f"No 'shop' given. Actual value: {shop}")
fields = []
if product.basePrice:
fields.append({"name": "Price", "value": product.getPriceWithCurrency()})
if product.sizes:
sizeBlock = [f"{size.sizeEU}" for size in product.sizes if size.isInStock]
if sizeBlock:
fields.append({"name": "Sizes", "value": "\n".join(sizeBlock)})
self.setPayload(username=msgConfig.username,
title=product.name,
description=shop.name,
link=product.url,
thumbnailURL=product.urlThumb,
footer="️Webtomator © 2020 dbyte solutions",
fields=fields)
def _setLogPayload(self, msgConfig: MessageConfig, msgText: str):
self.setPayload(username=msgConfig.username, content=f"🔹{msgText}")
def _setErrorPayload(self, msgConfig: MessageConfig, msgText: str):
self.setPayload(username=msgConfig.username, content=f"❗️{msgText}")
def setPayload(self, **kwargs):
username: str = kwargs.get("username")
content: str = kwargs.get("content")
title: str = kwargs.get("title")
description: str = kwargs.get("description")
link: str = kwargs.get("link")
thumbnailURL: str = kwargs.get("thumbnailURL")
fields: dict = kwargs.get("fields")
footer: str = kwargs.get("footer")
data = dict()
# For common data structure, see
# https://discordapp.com/developers/docs/resources/webhook#execute-webhook
data["username"] = username or ""
data["content"] = content or ""
embed = dict()
# For 'embed' data structure, see
# https://discordapp.com/developers/docs/resources/channel#embed-object
if kwargs.get("title"):
embed.update({"title": title})
if description:
embed.update({"description": description})
if link:
embed.update({"url": link})
if thumbnailURL:
embed.update({"thumbnail": {"url": thumbnailURL}})
if fields:
embed.update({"fields": fields})
if footer:
embed.update({"footer": {"text": footer}})
# We may leave this out when there are no embeds
if embed:
data["embeds"] = list()
data["embeds"].append(embed)
self._payload = data
# TODO unit test
class DiscordTinyDao(TinyDao):
_DEFAULT_PATH: ClassVar = APP_USERDATA_DIR / "Messengers.json"
_TABLE_NAME: ClassVar = "Discord"
def __init__(self, path: Path = None):
path = path or self._DEFAULT_PATH
super().__init__(path=path, table=self._TABLE_NAME)
@overload
def find(self, apiEndpointByType: str) -> str:
...
@overload
def find(self, messageConfig: str) -> List[MessageConfig]:
...
def find(self, **kwargs):
apiEndpointByType: str = kwargs.get("apiEndpointByType")
messageConfig: str = kwargs.get("messageConfig")
if messageConfig:
return self._findMessageConfig(configName=messageConfig)
elif apiEndpointByType:
return self._findApiEndpointByType(apiType=apiEndpointByType)
else:
raise AttributeError(f"None of the expected kwargs were given. kwargs are: {kwargs}")
def _findMessageConfig(self, configName: str) -> MessageConfig:
query = tdb.Query().configName == configName # prepare statement
results = super().find(condition=query) # raises
try:
decodedMessageConfig = MessageConfig(**results[0])
return decodedMessageConfig
except Exception as e:
raise LookupError(f"No message configuration found. "
f"Search value: '{configName}'. {e}")
def _findApiEndpointByType(self, apiType: str) -> str:
query = tdb.Query().apiType == apiType # prepare statement
results = super().find(condition=query) # raises
try:
endpoint = results[0].get("apiEndpoint")
return endpoint
except Exception as e:
raise LookupError(f"No configured API for type '{apiType}' found. {e}")
# TODO unit test
class Repo:
def __init__(self, dao: Dao = DiscordTinyDao()):
self._dao = dao
def findWebhookApiEndpoint(self) -> str:
with self._dao as dao:
endpoint = dao.find(apiEndpointByType="webhook")
return endpoint
def findProductMessageConfig(self) -> MessageConfig:
with self._dao as dao:
msgConfig = dao.find(messageConfig="product-msg-config")
return msgConfig
def findErrorMessageConfig(self) -> MessageConfig:
with self._dao as dao:
msgConfig = dao.find(messageConfig="error-msg-config")
return msgConfig
def findLogMessageConfig(self) -> MessageConfig:
with self._dao as dao:
msgConfig = dao.find(messageConfig="log-msg-config")
return msgConfig
# TODO unit test
@dataclass
class MessageConfig:
"""
Note: Attribute names must exactly correspond with keys in JSON document.
"""
configName: str
user: str
token: str
timeout: int
maxRetries: int
useRandomProxy: bool
username: str
|
dbyte/WebtomatorPublicEdition
|
webtomator/network/messenger.py
|
messenger.py
|
py
| 8,076 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "network.connection.Request",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "shop.product.Product",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "shop.shop.Shop",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.overload",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.overload",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.overload",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "shop.product",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "shop.product",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "shop.product.Product",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "shop.shop.Shop",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "shop.product",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "shop.product",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "shop.product.name",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "shop.product",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "storage.tinyDao.TinyDao",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "config.base.APP_USERDATA_DIR",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "typing.overload",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "typing.overload",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "tinydb.Query",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "tinydb.Query",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "storage.base.Dao",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 237,
"usage_type": "name"
}
] |
14637149631
|
import os
import tkinter as tk
import tkinter.ttk as ttk
import time
import sys
import traceback
from functools import partial
from datetime import datetime
from mqttk.constants import CONNECT, COLOURS
class TopicBrowser(ttk.Frame):
def __init__(self, master, config_handler, log, root, *args, **kwargs):
super().__init__(master=master, *args, **kwargs)
self.config_handler = config_handler
self.log = log
self.subscription_frames = {}
self.color_carousel = -1
self.current_connection = None
self.last_connection = None
self.current_subscription = None
self.root = root
self.mqtt_manager = None
self.message_id_counter = 0
self.individual_topics = 0
# Subscribe frame
self.topic_browser_bar_frame = ttk.Frame(self, height=1)
self.topic_browser_bar_frame.pack(anchor="nw", side=tk.TOP, fill=tk.X)
# Subscribe selector combobox
self.subscribe_selector = ttk.Combobox(self.topic_browser_bar_frame, width=30, exportselection=False)
self.subscribe_selector.pack(side=tk.LEFT, padx=3, pady=3)
self.subscribe_selector["values"] = []
# Subscribe button
self.browse_button = ttk.Button(self.topic_browser_bar_frame, width=10, text="Browse")
self.browse_button.pack(side=tk.LEFT, padx=3, pady=3)
self.browse_button["command"] = self.add_subscription
# Stop button
self.stop_button = ttk.Button(self.topic_browser_bar_frame, width=10, text="Stop", state="disabled")
self.stop_button.pack(side=tk.LEFT, padx=3, pady=3)
self.stop_button["command"] = self.on_unsubscribe
self.stat_label = ttk.Label(self.topic_browser_bar_frame)
self.stat_label.pack(side=tk.LEFT, padx=3, pady=3)
# Flush messages button
self.flush_messages_button = ttk.Button(self.topic_browser_bar_frame, text="Clear topics")
self.flush_messages_button.pack(side=tk.RIGHT, padx=3)
self.flush_messages_button["command"] = self.flush_messages
# Filter retained checkbox
self.filter_retained = tk.IntVar()
self.filter_retained_checkbox = ttk.Checkbutton(self.topic_browser_bar_frame,
text="Ignore retained messages",
variable=self.filter_retained,
offvalue=0,
onvalue=1)
self.filter_retained_checkbox.pack(side=tk.RIGHT, padx=3)
self.treeview_frame = ttk.Frame(self)
self.treeview_frame.pack(expand=1, fill="both", pady=2, padx=2)
self.topic_treeview = ttk.Treeview(self.treeview_frame, columns=("qos", "retained", "last_message", "payload"), show="tree headings")
self.topic_treeview.heading('#0', text='Topic')
self.topic_treeview.column('#0', minwidth=300, width=300, stretch=tk.NO)
self.topic_treeview.heading('qos', text='QoS')
self.topic_treeview.column('qos', minwidth=50, width=50, stretch=tk.NO)
self.topic_treeview.heading('retained', text='Retained')
self.topic_treeview.column('retained', minwidth=70, width=80, stretch=tk.NO)
self.topic_treeview.heading('last_message', text='Last message')
self.topic_treeview.column('last_message', minwidth=70, width=90, stretch=tk.NO)
self.topic_treeview.heading('payload', text='Payload')
self.topic_treeview.column('payload', minwidth=300, width=900, stretch=tk.NO)
if sys.platform == "darwin":
self.topic_treeview.bind("<Button-2>", self.popup)
if sys.platform == "linux":
self.topic_treeview.bind("<Button-3>", self.popup)
if sys.platform == "win32":
self.topic_treeview.bind("<Button-3>", self.popup)
self.vertical_scrollbar = ttk.Scrollbar(self.treeview_frame, orient="vertical", command=self.topic_treeview.yview)
self.vertical_scrollbar.pack(side=tk.RIGHT, fill="y")
self.topic_treeview.configure(yscrollcommand=self.vertical_scrollbar.set)
self.topic_treeview.pack(fill="both", side=tk.LEFT, expand=1)
self.horizontal_scrollbar = ttk.Scrollbar(self, orient="horizontal", command=self.topic_treeview.xview)
self.horizontal_scrollbar.pack(side=tk.BOTTOM, fill="x")
self.topic_treeview.configure(xscrollcommand=self.horizontal_scrollbar.set)
self.popup_menu = tk.Menu(self, tearoff=0)
self.popup_menu.add_command(label="Copy topic", command=self.copy_topic)
self.popup_menu.add_command(label="Copy payload", command=self.copy_payload)
def interface_toggle(self, connection_state, mqtt_manager, current_connection):
# Subscribe tab items
self.mqtt_manager = mqtt_manager
if connection_state != CONNECT:
self.last_connection = self.current_connection
else:
if self.last_connection != current_connection:
self.flush_messages()
self.current_connection = current_connection
self.browse_button.configure(state="normal" if connection_state is CONNECT else "disabled")
self.subscribe_selector.configure(state="normal" if connection_state is CONNECT else "disabled")
self.current_subscription = None
def get_color(self, topic):
colour = self.config_handler.get_subscription_colour(self.current_connection, topic)
if colour is not None:
return colour
self.color_carousel += 1
if self.color_carousel > len(COLOURS):
self.color_carousel = 0
return COLOURS[self.color_carousel]
def add_subscription(self):
topic = self.subscribe_selector.get()
if topic != "":
try:
callback = partial(self.on_mqtt_message, subscription_pattern=topic)
callback.__name__ = "MyCallback" # This is to fix some weird behaviour of the paho client on linux
self.mqtt_manager.add_subscription(topic_pattern=topic,
on_message_callback=callback)
except Exception as e:
self.log.exception("Failed to subscribe!", e)
return
if self.subscribe_selector["values"] == "":
self.subscribe_selector["values"] = [topic]
elif topic not in self.subscribe_selector['values']:
self.subscribe_selector['values'] += (topic,)
self.config_handler.add_subscription_history(self.current_connection,
topic,
self.get_color(topic))
self.current_subscription = topic
self.browse_button["state"] = "disabled"
self.stop_button["state"] = "normal"
def load_subscription_history(self):
self.subscribe_selector.configure(
values=self.config_handler.get_subscription_history_list(self.current_connection))
self.subscribe_selector.set(self.config_handler.get_last_subscribe_used(self.current_connection))
def on_mqtt_message(self, _, __, msg, subscription_pattern):
try:
if bool(self.filter_retained.get()) and msg.retain == 1:
return
try:
payload_decoded = str(msg.payload.decode("utf-8"))
except Exception:
payload_decoded = msg.payload
time_string = datetime.fromtimestamp(time.time()).strftime("%H:%M:%S")
topic_split = msg.topic.split("/")
# Fix anomaly when someone thinks MQTT is linux and starts the topic with /...
if msg.topic.startswith("/"):
topic_split[0] = "/"
if 1 < len(topic_split):
if topic_split[0] not in self.topic_treeview.get_children(""):
try:
self.topic_treeview.insert("", "end", topic_split[0], text=topic_split[0])
except Exception:
# print(msg.topic, topic_split[0], self.topic_treeview.get_children(""))
raise
for i in range(1, len(topic_split)-1):
parent_topic = "/".join(topic_split[0:i])
topic = "/".join(topic_split[0:i+1])
if topic not in self.topic_treeview.get_children(parent_topic):
self.topic_treeview.insert(parent_topic, "end", topic, text=topic_split[i])
parent_topic = "/".join(topic_split[0:-1])
topic = "/".join(topic_split)
if topic not in self.topic_treeview.get_children(parent_topic):
self.update_individual_topics()
self.topic_treeview.insert(parent_topic,
"end",
"/".join(topic_split),
text=topic_split[-1],
values=(msg.qos,
"RETAINED" if msg.retain == 1 else "",
time_string,
payload_decoded))
else:
self.topic_treeview.set(topic, "qos", msg.qos)
self.topic_treeview.set(topic, "retained", "RETAINED" if msg.retain == 1 else "")
self.topic_treeview.set(topic, "last_message", time_string)
self.topic_treeview.set(topic, "payload", payload_decoded)
except Exception as e:
self.log.exception("Exception inserting new message to treeview",
os.linesep, msg.topic, msg.payload, os.linesep, e, traceback.format_exc())
def on_unsubscribe(self):
try:
self.mqtt_manager.unsubscribe(self.current_subscription)
except Exception as e:
self.log.warning("Failed to unsubscribe", self.current_subscription, "maybe a failed subscription?")
self.current_subscription = None
self.stop_button["state"] = "disabled"
self.browse_button["state"] = "normal"
def flush_messages(self):
self.update_individual_topics(0)
for child in self.topic_treeview.get_children():
self.topic_treeview.delete(child)
def update_individual_topics(self, value=None):
if value is not None:
self.individual_topics = value
else:
self.individual_topics += 1
self.stat_label["text"] = "{} individual topics mapped".format(self.individual_topics)
def copy_topic(self, *args, **kwargs):
try:
topic = self.topic_treeview.selection()[0]
except Exception:
pass
else:
self.root.clipboard_clear()
self.root.clipboard_append(topic)
def copy_payload(self, *args, **kwargs):
try:
selection = self.topic_treeview.selection()[0]
values = self.topic_treeview.item(selection).get("values", [])
payload = values[3]
except Exception:
pass
else:
self.root.clipboard_clear()
self.root.clipboard_append(payload)
def popup(self, event, *args, **kwargs):
try:
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
except Exception as e:
pass
finally:
self.popup_menu.grab_release()
|
matesh/mqttk
|
mqttk/widgets/topic_browser.py
|
topic_browser.py
|
py
| 11,605 |
python
|
en
|
code
| 27 |
github-code
|
6
|
[
{
"api_name": "tkinter.ttk.Frame",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Frame",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "tkinter.TOP",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "tkinter.X",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.Combobox",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "tkinter.LEFT",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.Button",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "tkinter.LEFT",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.Button",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "tkinter.LEFT",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.Label",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "tkinter.LEFT",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.Button",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "tkinter.RIGHT",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "tkinter.IntVar",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Checkbutton",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "tkinter.RIGHT",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.Frame",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Treeview",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "tkinter.NO",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "tkinter.NO",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "tkinter.NO",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "tkinter.NO",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "tkinter.NO",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "sys.platform",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "sys.platform",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "sys.platform",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.Scrollbar",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "tkinter.RIGHT",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "tkinter.LEFT",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.Scrollbar",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "tkinter.BOTTOM",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Menu",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "mqttk.constants.CONNECT",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "mqttk.constants.CONNECT",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "mqttk.constants.CONNECT",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "mqttk.constants.COLOURS",
"line_number": 113,
"usage_type": "argument"
},
{
"api_name": "mqttk.constants.COLOURS",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "os.linesep",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "traceback.format_exc",
"line_number": 194,
"usage_type": "call"
}
] |
20172575137
|
import os
import sys
from typing import Optional
from dotenv import load_dotenv
from spinner import Spinner
import actions
import response_parser
import speech
import gpt
message_history = []
GENERAL_DIRECTIONS_PREFIX = """
CONSTRAINTS:
- Cannot run Python code that requires user input.
ACTIONS:
- "TELL_USER": tell the user something. The schema for the action is:
TELL_USER: <TEXT>
- "READ_FILE": read the current state of a file. The schema for the action is:
READ_FILE: <PATH>
- "WRITE_FILE": write a block of text to a file. The schema for the action is:
WRITE_FILE: <PATH>
```
<TEXT>
```
- "RUN_PYTHON": run a Python file. The schema for the action is:
RUN_PYTHON: <PATH>
- "SEARCH_ONLINE": search online and get back a list of URLs relevant to the query. The schema for the action is:
SEARCH_ONLINE: <QUERY>
- EXTRACT_INFO: extract specific information from a webpage. The schema for the action is:
EXTRACT_INFO: <URL>, <a brief instruction to GPT for information to extract>
- "SHUTDOWN": shut down the program. The schema for the action is:
SHUTDOWN
RESOURCES:
1. File contents after reading file.
2. Online search results returning URLs.
3. Output of running a Python file.
PERFORMANCE EVALUATION:
1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.
2. Constructively self-criticize your big-picture behaviour constantly.
3. Reflect on past decisions and strategies to refine your approach.
4. Every action has a cost, so be smart and efficent. Aim to complete tasks in the least number of steps.
Write only one action. The action must one of the actions specified above and must be written according to the schema specified above.
After the action, write a JSON object (parseable by Python's json.loads()) which must contain the following keys:
- "reason": a short sentence explaining the action above
- "plan": a short high-level plan in plain English
"""
FLAG_VERBOSE = "--verbose"
FLAG_SPEECH = "--speech"
FLAG_CONTINUOUS = "--continuous"
def main():
general_directions = GENERAL_DIRECTIONS_PREFIX
if FLAG_SPEECH in sys.argv[1:]:
general_directions += '- "speak": a short summary of thoughts to say to the user'
general_directions += "\n\n"
general_directions += "If you want to run an action that is not in the above list of actions, send the SHUTDOWN action instead and explain in 'reason' which action you wanted to run.\n"
general_directions += "So, write one action and one metadata JSON object, nothing else."
load_dotenv()
os.makedirs("workspace", exist_ok=True)
os.chdir("workspace")
new_plan: Optional[str] = None
user_directions = input("What would you like me to do:\n")
while True:
print("========================")
with Spinner("Thinking..."):
assistant_response = gpt.chat(user_directions, general_directions, new_plan, message_history)
if FLAG_VERBOSE in sys.argv[1:]:
print(f"ASSISTANT RESPONSE: {assistant_response}")
action, metadata = response_parser.parse(assistant_response)
print(f"ACTION: {action.short_string()}")
if FLAG_SPEECH in sys.argv[1:] and metadata.speak is not None:
speech.say_async(metadata.speak)
if isinstance(action, actions.ShutdownAction):
print("Shutting down...")
break
else:
print(f"REASON: {metadata.reason}")
print(f"PLAN: {metadata.plan}")
if FLAG_CONTINUOUS not in sys.argv[1:]:
run_action = input("Run the action? [Y/n]")
if run_action.lower() != "y" and run_action != "":
break
action_output = action.run()
message_content = f"Action {action.key()} returned:\n{action_output}"
message_history.append({"role": "system", "content": message_content})
change_plan = input("Change the proposed plan? [N/y]")
if change_plan.lower() == "y":
new_plan = input("What would you like me to change the plan to? ")
else:
new_plan = None
if __name__ == "__main__":
main()
|
rokstrnisa/RoboGPT
|
robogpt/main.py
|
main.py
|
py
| 4,135 |
python
|
en
|
code
| 264 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "spinner.Spinner",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "gpt.chat",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "response_parser.parse",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "speech.say_async",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "actions.ShutdownAction",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 107,
"usage_type": "attribute"
}
] |
11000393367
|
import typing as T
from datetime import datetime, timedelta
from pydantic import BaseModel
from mirai import (
Mirai, Member, Friend,
MessageChain, At
)
from .alias import MESSAGE_T
# https://mirai-py.originpages.com/tutorial/annotations.html
Sender = T.Union[Member, Friend]
Type = str
def reply(app: Mirai, sender: "Sender", event_type: "Type"):
"""app_reply = reply(app, sender, event_type)
app_reply(message)
"""
async def wrapper(message: MESSAGE_T, *, at_sender: bool = False):
if at_sender:
if isinstance(message, list):
message.insert(0, At(sender.id))
elif isinstance(message, MessageChain):
message.__root__.insert(0, At(sender.id))
else:
raise TypeError(f"not supported type for reply: {message.__class__.__name__}")
if event_type == "GroupMessage":
await app.sendGroupMessage(sender.group, message)
elif event_type == "FriendMessage":
await app.sendFriendMessage(sender, message)
else:
raise ValueError("Not supported event type")
return wrapper
def at_me(app: Mirai, message: MessageChain):
at: T.Optional[At] = message.getFirstComponent(At)
if at:
return at.target == app.qq
else:
return False
class CoolDown(BaseModel):
"""example:
cd = CoolDown(app='app1', td=20)
cd.update(123)
cd.check(123)
"""
app: str
td: float # timedelta
value: T.Dict[int, datetime] = {}
def update(self, mid: int) -> None:
self.value.update({mid: datetime.now()})
def check(self, mid: int) -> bool:
ret = datetime.now() >= self.value.get(mid, datetime.utcfromtimestamp(0)) + timedelta(seconds=self.td)
return ret
def shuzi2number(shuzi: T.Optional[str]) -> int:
s = {'一': 1, '两': 2, '二': 2, '三': 3,
'四': 4, '五': 5, '六': 6, '七': 7,
'八': 8, '九': 9, '十': 10}
if not shuzi:
return 1
elif shuzi.isdecimal():
return int(shuzi)
elif shuzi in s.keys():
return s[shuzi]
else:
return 1
|
Lycreal/MiraiBot
|
plugins/_utils/__init__.py
|
__init__.py
|
py
| 2,146 |
python
|
en
|
code
| 70 |
github-code
|
6
|
[
{
"api_name": "typing.Union",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "mirai.Member",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "mirai.Friend",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "mirai.Mirai",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "alias.MESSAGE_T",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "mirai.At",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "mirai.MessageChain",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "mirai.At",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "mirai.Mirai",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "mirai.MessageChain",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "mirai.At",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 65,
"usage_type": "attribute"
}
] |
74285386747
|
import sys
sys.path.append('../python')
sys.path.append('../apps')
import needle as ndl
from d2l import torch as d2l
import torch
import torch.nn as nn
import numpy as np
class MultiHeadAttention(nn.Module):
"""多头注意力"""
def __init__(self, key_size, query_size, value_size, num_hiddens,
num_heads, dropout, bias=False, **kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self.num_heads = num_heads
self.attention = d2l.DotProductAttention(dropout)
self.W_q = nn.Linear(query_size, num_hiddens, bias=bias)
self.W_k = nn.Linear(key_size, num_hiddens, bias=bias)
self.W_v = nn.Linear(value_size, num_hiddens, bias=bias)
self.W_o = nn.Linear(num_hiddens, num_hiddens, bias=bias)
torch.nn.init.kaiming_uniform_(self.W_q.weight)
torch.nn.init.kaiming_uniform_(self.W_k.weight)
torch.nn.init.kaiming_uniform_(self.W_v.weight)
torch.nn.init.kaiming_uniform_(self.W_o.weight)
### test
self.X1 = None
self.X2 = None
self.X3 = None
self.output = None
self.vl = None
def forward(self, queries, keys, values, valid_lens):
# queries,keys,values的形状:
# (batch_size,查询或者“键-值”对的个数,num_hiddens)
# valid_lens 的形状:
# (batch_size,)或(batch_size,查询的个数)
# 经过变换后,输出的queries,keys,values 的形状:
# (batch_size*num_heads,查询或者“键-值”对的个数,
# num_hiddens/num_heads)
queries = self.transpose_qkv(self.W_q(queries), self.num_heads)
keys = self.transpose_qkv(self.W_k(keys), self.num_heads)
values = self.transpose_qkv(self.W_v(values), self.num_heads)
if valid_lens is not None:
# 在轴0,将第一项(标量或者矢量)复制num_heads次,
# 然后如此复制第二项,然后诸如此类。
valid_lens = torch.repeat_interleave(
valid_lens, repeats=self.num_heads, dim=0)
self.vl = valid_lens
# output的形状:(batch_size*num_heads,查询的个数,
# num_hiddens/num_heads)
output = self.attention(queries, keys, values, valid_lens)
self.output = output
# output_concat的形状:(batch_size,查询的个数,num_hiddens)
output_concat = self.transpose_output(output, self.num_heads)
return self.W_o(output_concat)
def transpose_qkv(self, X, num_heads):
"""为了多注意力头的并行计算而变换形状"""
# 输入X的形状:(batch_size,查询或者“键-值”对的个数,num_hiddens)
# 输出X的形状:(batch_size,查询或者“键-值”对的个数,num_heads,
# num_hiddens/num_heads)
X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)
self.X1 = X.detach().numpy()
# 输出X的形状:(batch_size,num_heads,查询或者“键-值”对的个数,
# num_hiddens/num_heads)
X = X.permute(0, 2, 1, 3)
self.X2 = X.detach().numpy()
# 最终输出的形状:(batch_size*num_heads,查询或者“键-值”对的个数,
# num_hiddens/num_heads)
X3 = X.reshape(-1, X.shape[2], X.shape[3])
self.X3 = X3.detach().numpy()
return X3
def transpose_output(self, X, num_heads):
"""逆转transpose_qkv函数的操作"""
X = X.reshape(-1, num_heads, X.shape[1], X.shape[2])
X = X.permute(0, 2, 1, 3)
return X.reshape(X.shape[0], X.shape[1], -1)
num_hiddens, num_heads = 100, 5
batch_size, num_queries = 2, 4
num_kvpairs = 6
valid_lens = torch.tensor([3, 2])
# valid_lens = None
X = torch.randn((batch_size, num_queries, num_hiddens),dtype=torch.float32)
Y = torch.randn((batch_size, num_kvpairs, num_hiddens),dtype=torch.float32)
# d2l.check_shape(attention(X, Y, Y, valid_lens),
# (batch_size, num_queries, num_hiddens))
dropout = 0
attention_ = ndl.nn.MultiHeadAttention(num_hiddens, num_hiddens, num_hiddens,
num_hiddens, num_heads, dropout, device=ndl.cpu(), dtype="float32")
valid_lens_ = valid_lens.detach().numpy() if valid_lens is not None else None
X_ = ndl.Tensor(X.detach().numpy(), device=ndl.cpu(), dtype="float32")
Y_ = ndl.Tensor(Y.detach().numpy(), device=ndl.cpu(), dtype="float32")
attention = MultiHeadAttention(num_hiddens, num_hiddens, num_hiddens,
num_hiddens, num_heads, dropout)
attention.W_q.weight = torch.nn.Parameter(torch.tensor(attention_.W_q.weight.numpy().T, dtype=torch.float32))
attention.W_k.weight = torch.nn.Parameter(torch.tensor(attention_.W_k.weight.numpy().T, dtype=torch.float32))
attention.W_v.weight = torch.nn.Parameter(torch.tensor(attention_.W_v.weight.numpy().T, dtype=torch.float32))
attention.W_o.weight = torch.nn.Parameter(torch.tensor(attention_.W_o.weight.numpy().T, dtype=torch.float32))
print("W_q.weight:", np.linalg.norm(attention.W_q.weight.T.detach().numpy()-attention_.W_q.weight.numpy()))
print("W_k.weight:", np.linalg.norm(attention.W_k.weight.T.detach().numpy()-attention_.W_k.weight.numpy()))
print("W_v.weight:", np.linalg.norm(attention.W_v.weight.T.detach().numpy()-attention_.W_v.weight.numpy()))
print("W_o.weight:", np.linalg.norm(attention.W_o.weight.T.detach().numpy()-attention_.W_o.weight.numpy()))
print("X:", np.linalg.norm(X.detach().numpy()-X_.numpy()))
queries = attention.transpose_qkv(attention.W_q(X), attention.num_heads)
queries_ = attention_.transpose_qkv(attention_.W_q(X_))
zq = attention.W_q(X).detach().numpy()
zq_ = attention_.W_q(X_).numpy()
print("W_q.weight:", np.linalg.norm(attention.W_q.weight.T.detach().numpy() - attention_.W_q.weight.numpy()))
print("W_q(X):", np.linalg.norm(zq - zq_))
X1 = X.reshape((X.shape[0], X.shape[1], attention.num_heads, -1))
X1_ = X_.reshape((X_.shape[0], X_.shape[1], attention_.num_heads, -1))
print("X1-X1_:", np.linalg.norm(X1.detach().numpy() - X1_.numpy()))
# print("X1.shape", attention.X1.shape)
# print("X1_.shape", attention_.X1.shape)
# print("X2.shape", attention.X2.shape)
# print("X2_.shape", attention_.X2.shape)
# print("X3.shape", attention.X3.shape)
# print("X3_.shape", attention_.X3.shape)
# print("X1:", np.linalg.norm(attention.X1-attention_.X1))
# print("X2:", np.linalg.norm(attention.X2-attention_.X2))
# print("X3:", np.linalg.norm(attention.X3-attention_.X3))
keys = attention.transpose_qkv(attention.W_k(Y), attention.num_heads)
keys_ = attention_.transpose_qkv(attention_.W_k(Y_))
# print("X1:", np.linalg.norm(attention.X1-attention_.X1))
# print("X2:", np.linalg.norm(attention.X2-attention_.X2))
# print("X3:", np.linalg.norm(attention.X3-attention_.X3))
values = attention.transpose_qkv(attention.W_v(Y), attention.num_heads)
values_ = attention_.transpose_qkv(attention_.W_v(Y_))
# print("X1:", np.linalg.norm(attention.X1-attention_.X1))
# print("X2:", np.linalg.norm(attention.X2-attention_.X2))
# print("X3:", np.linalg.norm(attention.X3-attention_.X3))
print(np.linalg.norm(X.detach().numpy()-X_.numpy()))
print(np.linalg.norm(Y.detach().numpy()-Y_.numpy()))
print(np.linalg.norm(queries.detach().numpy()-queries_.numpy()))
print(np.linalg.norm(keys.detach().numpy()-keys_.numpy()))
print(np.linalg.norm(values.detach().numpy()-values_.numpy()))
attention.eval()
y = attention(X, Y, Y, valid_lens)
print("attn_output.shape:", y.shape)
y_ = attention_(X_, Y_, Y_, valid_lens_)
print("attn_output_.shape:", y_.shape)
if (valid_lens is not None):
print("valid_lens:", np.linalg.norm(attention.vl.detach().numpy()-attention_.vl))
print("output:", np.linalg.norm(attention.output.detach().numpy()-attention_.output.numpy()))
print("attn_output:", np.linalg.norm(y.detach().numpy()-y_.numpy()))
|
Erostrate9/needle
|
tests/MultiHeadAttention.py
|
MultiHeadAttention.py
|
py
| 7,843 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "d2l.torch.DotProductAttention",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "d2l.torch",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.kaiming_uniform_",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.kaiming_uniform_",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.kaiming_uniform_",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.kaiming_uniform_",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.repeat_interleave",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "torch.randn",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "needle.nn.MultiHeadAttention",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "needle.nn",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "needle.cpu",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "needle.Tensor",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "needle.cpu",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "needle.Tensor",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "needle.cpu",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 169,
"usage_type": "attribute"
}
] |
1824122729
|
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from book.models import BookInfo
# Create your views here.
################################# Request #######################################################################################################################################
def create_book(request):
book = BookInfo.objects.create(
name='abc',
pub_date='2022-5-3',
readcount=10
)
return HttpResponse('create')
def shop(request, city_id, shop_id):
print(city_id,shop_id)
query_params = request.GET
print(query_params)
# order = query_params['order']
# order = query_params.get('oder')
# <QueryDict: {'order': ['readcount'], 'page': ['1']}>
# QueryDict 具有字典的特性
# 还具有 一键多值
# # <QueryDict: {'order': ['readcount', 'commentcount'], 'page': ['1']}>
order = query_params.getlist('order')
print(order)
return HttpResponse('python_django学习')
def register(request):
data = request.POST
print(data)
# < QueryDict: {'username': ['xixi'], 'password': ['123']} >
return HttpResponse('Register')
def json(request):
body = request.body
# print(body)
# b'{\n\t"name":"xixi",\n\t"age": 28\n}'
body_str = body.decode()
# print(body_str)
"""
{
"name":"xixi",
"age": 28
}
<class 'str'>
"""
# print(type(body_str))
# JSON形式的字符串 可以转换为 Python的字典
import json
body_dict = json.loads(body_str)
print(body_dict)
# {'name': 'xixi', 'age': 28}
##############请求头############
# print(request.META)
print(request.META['SERVER_PROTOCOL'])
return HttpResponse('json')
def method(request):
print(request.method)
return HttpResponse('method')
def mobilePhone(request, phone_number):
print(phone_number)
return HttpResponse('mobilePhone')
################################### Response #################################################
def response(request):
# HttpResponse(content=响应体, content_type=响应体数据类型, status=状态码)
# response = HttpResponse('res', status=200)
#
# response['name'] = 'xixi'
#
# return response
# JSON -> dict
# dict -> JSON
info = {
'name': 'xixi',
'age': 28
}
info_list = [
{
'name': 'xixi',
'age': 28
},
{
'name': 'erxi',
'age': 28
}
]
# response = JsonResponse(info)
response = JsonResponse(info_list, safe=False)
# response = JsonResponse(data=info_list, safe=False)
# [{"name": "xixi", "age": 28}, {"name": "erxi", "age": 28}]
return response
# return redirect('http://www.baidu.com')
# import json
# data=json.dumps(info_list)
#
# response = HttpResponse(data)
# return response
# 1xx
# 2xx
# 200 成功
# 3xx
# 4xx 请求有问题
# 404 找不到页面 路由有问题
# 403 禁止访问 权限问题
# 5xx
# HTTP status code must be an integer from 100 to 599
#####################
"""
查询字符串
http://ip:port/path/path/?key=value&key1=value1
url 以 ? 为分割 分为2部分
?前边为 请求路径
?后边为 查询字符串 查询字符串 类似于字典 key=value 多个数据采用&拼接
"""
########################### cookie和session ##############################################################################
"""
第一次请求,携带 查询字符串
http://127.0.0.1:8000/set_cookie/?username=zhangsan&password=123
服务器接收到请求之后,获取username.服务器设置cookie信息,cookie信息包括 username
浏览器接收到服务器的响应之后,应该把cookie保存起来
第二次及其之后的请求,我们访问http://127.0.0.1:8000 都会携带cookie信息。 服务器就可以读取cookie信息,来判断用户身份
"""
def set_cookie(request):
# 设置cookies,服务器response设置cookie
# 1.获取查询字符串数据
username = request.GET.get('username')
pwd = request.GET.get('pwd')
# 2.服务器设置cookie
response = HttpResponse('set_cookie')
# key,value = '' max_age 过期时间,秒
response.set_cookie('name', username, max_age=3600) # 有效期一小时
response.set_cookie('pwd', pwd) # 临时cookie
# 删除cookies
response.delete_cookie('pwd')
return response
def get_cookie(request):
# 获取cookies 从request中获取
print(request.COOKIES)
# request.COOKIES 是字典数据
name = request.COOKIES.get('name')
return HttpResponse(name)
################## session #####################
# session 是保存在服务器端 -- 数据相对安全
# session需要依赖于cookie
"""
第一次请求 http://127.0.0.1:8000/set_session/?username=zhangsan 。我们在服务器端设置sesison信息
服务器同时会生成一个sessionid的cookie信息。
浏览器接收到这个信息之后,会把cookie数据保存起来
第二次及其之后的请求 都会携带这个sessionid. 服务器会验证这个sessionid. 验证没有问题会读取相关数据。实现业务逻辑
"""
def set_session(request):
# 1.模拟 获取用户信息
username = request.GET.get('username')
# 2. 设置session信息
user_id = 1
request.session['user_id'] = user_id
request.session['username'] = username
# 删除session
# request.session.clear() 清除 所有 session的value
# request.session.clear()
# request.session.flush() 清除 所有 session的 key&value
# request.session.flush()
# del request.session['键'] 清除 session 指定 key 的value
# del request.session['48e4r7tydk1z8zs6rbvxk0ox1ti14zh2']
# request.session.set_expiry(10)
return HttpResponse('set_session')
def get_session(request):
# 通过索引key 获取 字典 值,当session不存在/不匹配,异常报错,不推荐
# user_id = request.session['user_id']
# username = request.session['username']
user_id = request.session.get('user_id')
username = request.session.get('username')
content = '{},{}'.format(user_id,username)
return HttpResponse(content)
###############################类视图###################################
def login(requset):
print(requset.method)
if requset.method == 'GET':
return HttpResponse('get 请求')
else:
return HttpResponse('post 请求')
"""
类视图定义
类视图的定义
class 类视图名字(View):
def get(self,request):
return HttpResponse('xxx')
def http_method_lower(self,request):
return HttpResponse('xxx')
1. 继承自View
2. 类视图中的方法 是采用 http方法小写来区分不同的请求方式
"""
from django.views import View
class LoginView(View):
def get(self, request):
return HttpResponse('get 处理逻辑')
def post(self, request):
return HttpResponse('post 处理逻辑')
"""
我的订单、个人中心页面
如果登录用户 可以访问
如果未登录用户 不应该访问,应该跳转到登录页面
定义一个订单、个人中心 类视图
如果定义我有没有登录呢??? 我们以登录 后台站点为例
"""
from django.contrib.auth.mixins import LoginRequiredMixin
# class OrderView(View): # 只继承View类
# class OrderView(View, LoginRequiredMixin):
# 多继承LoginRequiredMixin 和 View类, 多继承有先后顺序
class OrderView(LoginRequiredMixin, View): # 多继承LoginRequiredMixin 和 View类
def get(self, request):
# 模拟登录标记
# isLogin = True
# if not isLogin:
# return HttpResponse('未登录,跳转到登录页面')
return HttpResponse('GET 我的订单页面,这个页面必须要登录')
def post(self, request):
isLogin = True
# if not isLogin:
# return HttpResponse('未登录,跳转到登录页面')
return HttpResponse('GET 我的订单页面,这个页面必须要登录')
|
guoxi-xixi/django_base
|
bookmanager03/book/views.py
|
views.py
|
py
| 8,205 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "book.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "book.models.BookInfo.objects.create",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "book.models.BookInfo.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "book.models.BookInfo",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "django.views.View",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 298,
"usage_type": "call"
}
] |
74280782907
|
import pandas as pd
import numpy as np
from xgboost import XGBClassifier
from metrics import macro_f1
import settings
import pickle
import gc
import time
class BCXGBTrainer:
def __init__(self, config, logger):
self.config = config
self.model_params = config['model_params']
self.training_params = config['training_params']
self.logger = logger
def train_and_validate(self, df):
self.logger.info('Run training !')
self.logger.info(f'config : {self.config}')
xgb_oof = np.zeros((df.shape[0],))
xgb_oof_score = []
xgb_importances = pd.DataFrame()
model_save_dir = settings.MODEL / self.model_params['model_save_dir']
model_save_dir.mkdir(parents=True, exist_ok=True)
tabular_features = self.config['tabular_features']
target = self.training_params['target']
X = df[tabular_features]
y = df[target]
model = XGBClassifier(**self.training_params['best_params'])
for fold in range(self.config['n_folds']):
self.logger.info(f'Fold {fold} training ...')
start_time = time.time()
train_idx, valid_idx = df.loc[df['fold'] !=
fold].index, df.loc[df['fold'] == fold].index
X_train, X_valid = X.iloc[train_idx], X.iloc[valid_idx]
y_train, y_valid = y.iloc[train_idx], y.iloc[valid_idx]
model.fit(X_train, y_train,
eval_set=[(X_valid, y_valid)],
**self.training_params['fit_params'])
fi_tmp = pd.DataFrame()
fi_tmp['feature'] = X_train.columns
fi_tmp['importance'] = model.feature_importances_
fi_tmp['fold'] = fold
fi_tmp['seed'] = self.config['seed']
xgb_importances = xgb_importances.append(fi_tmp)
xgb_oof[valid_idx] = model.predict(X_valid)
score = macro_f1(y.iloc[valid_idx], xgb_oof[valid_idx])
xgb_oof_score.append(score)
model_save_path = model_save_dir / f'model_f{fold}_best.pkl'
with open(model_save_path, 'wb') as f:
pickle.dump(model, f, pickle.HIGHEST_PROTOCOL)
elapsed = time.time() - start_time
self.logger.info(
f'[Fold {fold}] valid_macro_f1 : {score:.6f} | time : {elapsed:.0f}s')
self.logger.info(
f"[Fold {fold}] best model saved : {model_save_path}")
self.logger.info('-'*100)
self.logger.info(
f'Average best valid_macro_F1 Score: {np.mean(xgb_oof_score):.6f}')
del model
gc.collect()
def inference(self, df_test):
xgb_preds = np.zeros((df_test.shape[0], ))
tabular_features = self.config['tabular_features']
X_test = df_test[tabular_features]
for fold in range(self.config['n_folds']):
start_time = time.time()
model_save_path = settings.MODEL / \
self.model_params['model_save_dir'] / f'model_f{fold}_best.pkl'
model = pickle.load(open(model_save_path, 'rb'))
xgb_preds += model.predict_proba(X_test)[:, 1] / \
self.config['n_folds']
elapsed = time.time() - start_time
self.logger.info(
f'[model_f{fold}_best] inference time : {elapsed:.0f}s')
del model
gc.collect()
xgb_preds = np.expand_dims(xgb_preds, axis=1)
preds_save_path = settings.MODEL / \
self.model_params['model_save_dir'] / f'preds.npy'
np.save(preds_save_path, xgb_preds)
self.logger.info(
f'Prediction result saved : {preds_save_path}')
def save_oof(self, df):
xgb_oof = np.zeros((df.shape[0], ))
xgb_oof_score = []
tabular_features = self.config['tabular_features']
target = self.training_params['target']
X = df[tabular_features]
y = df[target]
for fold in range(self.config['n_folds']):
start_time = time.time()
model_save_path = settings.MODEL / \
self.model_params['model_save_dir'] / f'model_f{fold}_best.pkl'
model = pickle.load(open(model_save_path, 'rb'))
valid_idx = df.loc[df['fold'] == fold].index
X_valid = X.iloc[valid_idx]
xgb_oof[valid_idx] = model.predict_proba(X_valid)[:, 1]
score = macro_f1(y.iloc[valid_idx], np.where(
xgb_oof[valid_idx] > 0.5, 1, 0))
xgb_oof_score.append(score)
elapsed = time.time() - start_time
self.logger.info(
f'[model_f{fold}_best] valid_macro_f1 : {score:.6f} | time : {elapsed:.0f}s')
del model
gc.collect()
xgb_oof = np.expand_dims(xgb_oof, axis=1)
oof_save_path = settings.MODEL / \
self.model_params['model_save_dir'] / f'oof.npy'
np.save(oof_save_path, xgb_oof)
self.logger.info(
f'Validation result saved : {oof_save_path}')
|
lim-hyo-jeong/DACON-Breast-Cancer
|
xgb_trainer.py
|
xgb_trainer.py
|
py
| 5,105 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "settings.MODEL",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "xgboost.XGBClassifier",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "metrics.macro_f1",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "settings.MODEL",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "settings.MODEL",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "numpy.save",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "settings.MODEL",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "metrics.macro_f1",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "settings.MODEL",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "numpy.save",
"line_number": 141,
"usage_type": "call"
}
] |
10678150202
|
import asyncio
from typing import List, Any, Set, Dict
import orjson
import websockets
from websockets import WebSocketServerProtocol
from blockchain import Blockchain
from block import Block
from transaction import Transaction
from utils import send, handle
class WsNode:
def __init__(self, domain: str):
self.domain: str = domain
self.nodes: Set[str] = set()
self.connects: Dict[str, WebSocketServerProtocol] = dict()
self.blockchain: Blockchain = Blockchain()
self.mem_pool: Set[Transaction] = set()
async def serve(self, node: str):
ws = self.connects[node]
while True:
try:
await self.handle(ws, orjson.loads(await ws.recv()))
except websockets.ConnectionClosed:
self.nodes.remove(node)
self.connects.pop(node)
break
async def handle(self, ws, message):
switcher = {
'blockchain_len': self.handle_blockchain_len,
'blockchain': self.handle_blockchain,
'hashes': self.handle_hashes,
}
await handle(switcher, ws, message)
async def broadcast(self, _type: str, data: Any = None, nodes: List[str] = None) -> None:
sockets = self.connects.values() if nodes is None else [self.connects[node] for node in nodes]
await asyncio.gather(*[send(ws, _type, data) for ws in sockets])
async def connect_nodes(self, nodes: List[str]):
olds = [self.domain] + self.node_list
news = []
for node in filter(lambda x: x not in olds, nodes):
news.append(node)
websocket = await websockets.connect(f'ws://{node}')
asyncio.get_event_loop().create_task(self.serve(node))
self.nodes.add(node)
self.connects[node] = websocket
inputs = [(node, olds + news) for node in news] + [(node, news) for node in olds]
if len(news) > 1 or (len(news) > 0 and self.domain not in news):
await asyncio.gather(*[self.share_nodes(*args) for args in inputs])
await self.pull_longest_chain(news)
async def share_nodes(self, node: str, nodes: List[str]):
print('share', nodes, 'to', node)
if node != self.domain:
ws = self.connects[node]
await send(ws, 'connect_nodes', {'nodes': nodes})
async def share_block(self, block: Block):
await self.broadcast('add_block', {'block': block.dict()})
async def pull_longest_chain(self, nodes: List[str] = None):
await self.broadcast('get_blockchain_len', nodes=nodes)
async def add_transaction(self, transaction: Transaction):
if transaction in self.mem_pool:
return
self.mem_pool.add(transaction)
await self.broadcast('add_transaction', {'transaction': transaction.dict()})
@property
def blockchain_len(self) -> int:
return len(self.blockchain)
@property
def node_list(self) -> List[str]:
return list(self.nodes)
@property
def mem_pool_list(self) -> List[Transaction]:
return list(self.mem_pool)
async def handle_blockchain_len(self, length: int) -> str:
if length > self.blockchain_len:
return 'get_blockchain_hashes'
async def handle_hashes(self, hashes: List[str]):
start = 0
for i, (a, b) in enumerate(zip(hashes, self.blockchain.hashes)):
if a != b:
start = i
break
return 'get_blockchain', {'start': start}
async def handle_blockchain(self, chain):
if chain[-1]['id'] > self.blockchain_len:
self.blockchain.blocks[chain[0]['id']:] = [Block.parse_obj(block_data['block']) for block_data in chain]
|
XmasApple/simple_blockchain
|
ws_node.py
|
ws_node.py
|
py
| 3,756 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Set",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "websockets.WebSocketServerProtocol",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "blockchain.Blockchain",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "transaction.Transaction",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "orjson.loads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "websockets.ConnectionClosed",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "utils.handle",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "asyncio.gather",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "utils.send",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "websockets.connect",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "asyncio.gather",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "utils.send",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "block.Block",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "block.dict",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "transaction.Transaction",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "transaction.dict",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "transaction.Transaction",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "block.Block.parse_obj",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "block.Block",
"line_number": 102,
"usage_type": "name"
}
] |
25159533855
|
import psycopg2
import random
con=psycopg2.connect('dbname=ecommerce_db user=postgres port=5432 host=localhost password=Murad2004')
cur=con.cursor()
def show(cursor):
cur.execute(query)
length = 30
print(*[desc[0].ljust(30) for desc in cursor.description], sep='')
print('-'*140)
result = cur.fetchall()
for row in result:
for col in row:
print(str(col).ljust(length)[:37], end='')
print()
# query="""
# CREATE TABLE seller(
# id SERIAL PRIMARY KEY,
# name VARCHAR(50)
# );
# CREATE TABLE product(
# id SERIAL PRIMARY KEY,
# title VARCHAR(50) NOT NULL,
# price NUMERIC NOT NULL,
# seller_id INT,
# CONSTRAINT fk_seller
# FOREIGN KEY(seller_id)
# REFERENCES seller(id)
# ON DELETE CASCADE
# );
# CREATE TABLE tag(
# id SERIAL PRIMARY KEY,
# title VARCHAR(50) NOT NULL
# );
# CREATE TABLE customer(
# id SERIAL PRIMARY KEY,
# name VARCHAR(50)
# );
# CREATE TABLE wishlist(
# id SERIAL PRIMARY KEY,
# customer_id INT,
# CONSTRAINT fk_customer
# FOREIGN KEY(customer_id)
# REFERENCES customer(id)
# ON DELETE CASCADE
# );
# CREATE TABLE wishlist_products(
# id SERIAL PRIMARY KEY,
# product_id INT,
# customer_id INT,
# CONSTRAINT fk_customer
# FOREIGN KEY(customer_id)
# REFERENCES customer(id)
# ON DELETE CASCADE,
# CONSTRAINT fk_product
# FOREIGN KEY(product_id)
# REFERENCES product(id)
# ON DELETE CASCADE
# );
# CREATE TABLE review(
# id SERIAL PRIMARY KEY,
# rate NUMERIC,
# customer_id INT,
# product_id INT,
# CONSTRAINT fk_customer
# FOREIGN KEY(customer_id)
# REFERENCES customer(id)
# ON DELETE SET NULL,
# CONSTRAINT fk_product
# FOREIGN KEY(product_id)
# REFERENCES product(id)
# ON DELETE CASCADE
# );
# CREATE TABLE product_tags(
# id SERIAL PRIMARY KEY,
# product_id INT,
# tag_id INT,
# CONSTRAINT fk_product_tag
# FOREIGN KEY(product_id)
# REFERENCES product(id)
# ON DELETE CASCADE,
# CONSTRAINT fk_tag_product
# FOREIGN KEY(tag_id)
# REFERENCES tag(id)
# ON DELETE CASCADE
# );
# """
customer_data=[{
"name": "Halette Milberry"
}, {
"name": "Barby Wastell"
}, {
"name": "Lexie Dragon"
}, {
"name": "Rosamond Kynston"
}, {
"name": "Christen Keyson"
}, {
"name": "Madeline Knottley"
}, {
"name": "Ruby Loachhead"
}, {
"name": "Aeriel Knowlden"
}, {
"name": "Hedy Phillipp"
}, {
"name": "Harmonia Freckelton"
}, {
"name": "Rossy Mustchin"
}, {
"name": "Dulcie Higgonet"
}, {
"name": "Kala Caldroni"
}, {
"name": "Nessie Lavery"
}, {
"name": "Shanta Polotti"
}, {
"name": "Berty Dampier"
}, {
"name": "Frans Fosdike"
}, {
"name": "Lotty Corkhill"
}, {
"name": "Randie Lawther"
}, {
"name": "Husain Reye"
}, {
"name": "Fayre McPhillimey"
}, {
"name": "Susette Raitie"
}, {
"name": "Sela Elsmore"
}, {
"name": "Taddeo Enterlein"
}, {
"name": "Valma Hutchence"
}, {
"name": "Micki Gorelli"
}, {
"name": "Arabelle Najera"
}, {
"name": "Annemarie Crenage"
}, {
"name": "Nara Whight"
}, {
"name": "Borg Downage"
}, {
"name": "Sheri Moreman"
}, {
"name": "Hew Dignum"
}, {
"name": "Jacquenette Caygill"
}, {
"name": "Margot Cradduck"
}, {
"name": "Adele Snassell"
}, {
"name": "Caryl Pevsner"
}, {
"name": "Gannon Northrop"
}, {
"name": "Artemas Goodlip"
}, {
"name": "Lawrence Crockatt"
}, {
"name": "Sheelagh Cosely"
}, {
"name": "Doralyn Tripett"
}, {
"name": "Grove Learman"
}, {
"name": "Rosanna Pretious"
}, {
"name": "Earle Sapshed"
}, {
"name": "Guido Onyon"
}, {
"name": "Rolfe Panner"
}, {
"name": "Hilly Dashwood"
}, {
"name": "Orland Shutt"
}, {
"name": "Kipp Blacksell"
}, {
"name": "Umberto Chaman"
}]
# query="""
# INSERT INTO customer(name) VALUES(%s);
# """
# for i in customer_data:
# cur.execute(query,(i['name'],))
# query="SELECT * FROM customer"
seller_data=[
{
"name": "Si Friary"
}, {
"name": "Scotty Ludlem"
}, {
"name": "Randa Ifill"
}, {
"name": "Vanessa Fay"
}, {
"name": "Tamarra Tossell"
}, {
"name": "Kennett Dumper"
}, {
"name": "Jessika Stienham"
}, {
"name": "Perry Branscombe"
}, {
"name": "Salaidh Schultz"
}, {
"name": "Nicolis Stonman"
}, {
"name": "Michale Brecknock"
}, {
"name": "Marian Withinshaw"
}, {
"name": "Lynea Benit"
}, {
"name": "Cale Giacometti"
}, {
"name": "Ave Jahnisch"
}, {
"name": "Aurelea Adshed"
}, {
"name": "Pavlov Borham"
}, {
"name": "Lamont McCanny"
}, {
"name": "Rustie Troyes"
}, {
"name": "Ivory Vina"
}]
# query="""
# INSERT INTO seller(name) VALUES(%s);
# """
# for i in seller_data:
# cur.execute(query,(i["name"],))
# query="SELECT * FROM seller"
# cur.execute(query)
tag_data=[
{
"title": "Cheese"
},
{
"title": "Chocolate"
},
{
"title": "Vanillia"
},
{
"title": "Vegetable"
},
{
"title": "Vegan"
},
{
"title": "Healthy"
},
{
"title": "Fit"
},
{
"title": "Meal"
},
{
"title": "Fast Food"
}
]
# query="""
# INSERT INTO tag(title) VALUES(%s);
# """
# for i in tag_data:
# cur.execute(query,(i['title'],))
# query='SELECT * FROM tag'
seller_ids=[]
for i in range(len(seller_data)):
seller_ids.append(i+1)
product_data=[
{
"title": "M&M Food Market",
"price": "17.0616609356653"
},
{
"title": "Soprole",
"price": "11.6234613464323"
},
{
"title": "Kinder",
"price": "2.62073436454904"
},
{
"title": "Andy Capp's fries",
"price": "14.6864611770429"
},
{
"title": "Bewley's",
"price": "7.01804420073426"
},
{
"title": "Vitta Foods",
"price": "4.5093621385793"
},
{
"title": "Taco Bell",
"price": "19.1318949810843"
},
{
"title": "Sun-Pat",
"price": "9.6603184191791"
},
{
"title": "Baskin robbins",
"price": "16.105171543595"
},
{
"title": "Wendy's",
"price": "5.43620887838128"
},
{
"title": "Cobblestone",
"price": "7.22419333514953"
},
{
"title": "Wonder Bread",
"price": "14.6278888390529"
},
{
"title": "Lavazza",
"price": "10.305469252777"
},
{
"title": "Kinder",
"price": "19.4697343713929"
},
{
"title": "Soprole",
"price": "16.3448767300439"
},
{
"title": "Nabisco",
"price": "2.48867588838966"
},
{
"title": "Tic Tac",
"price": "2.60812248457601"
},
{
"title": "Magnum",
"price": "19.4421954995218"
},
{
"title": "Papadopoulos",
"price": "19.4472127819654"
},
{
"title": "Wonder Bread",
"price": "12.7520409541913"
},
{
"title": "Papadopoulos",
"price": "1.811215852765"
},
{
"title": "Olymel",
"price": "7.34511601847835"
},
{
"title": "Domino",
"price": "7.64364533249459"
},
{
"title": "Pizza Hut",
"price": "12.6648227300797"
},
{
"title": "Red Lobster",
"price": "10.0007594130005"
},
{
"title": "Andy Capp's fries",
"price": "18.5981898673802"
},
{
"title": "Secret Recipe",
"price": "18.6991437984161"
},
{
"title": "Sun-Pat",
"price": "3.15631274094633"
},
{
"title": "Magnum",
"price": "10.3542353042188"
},
{
"title": "Heinz",
"price": "17.7369680049536"
},
{
"title": "Olymel",
"price": "19.9154627821015"
},
{
"title": "Taco Bell",
"price": "10.9514749045258"
},
{
"title": "Dunkin' Donuts",
"price": "11.479457990024"
},
{
"title": "Applebee's",
"price": "15.7718961763996"
},
{
"title": "Knorr",
"price": "10.4961827092321"
},
{
"title": "KFC",
"price": "12.4794360452702"
},
{
"title": "Domino",
"price": "17.0641279993877"
},
{
"title": "Knorr",
"price": "2.66790023197788"
},
{
"title": "Kits",
"price": "18.8862874209351"
},
{
"title": "Dunkin' Donuts",
"price": "7.84475450163929"
},
{
"title": "Applebee's",
"price": "13.4456292886499"
},
{
"title": "Nutella",
"price": "4.63776473637566"
},
{
"title": "Bewley's",
"price": "13.0057596485157"
},
{
"title": "Kits",
"price": "1.38640394266062"
},
{
"title": "Nesquik",
"price": "6.1496629436266"
},
{
"title": "KFC",
"price": "15.6723103028128"
},
{
"title": "Andy Capp's fries",
"price": "17.8805946269448"
},
{
"title": "Tic Tac",
"price": "7.01679017348997"
},
{
"title": "Andy Capp's fries",
"price": "7.87038087466284"
},
{
"title": "Bel Group",
"price": "10.6127773935966"
}
]
# query="""
# INSERT INTO product(title,price,seller_id) VALUES(%s,%s,%s);
# """
# for i in product_data:
# cur.execute(query,(i['title'],i['price'],random.choice(seller_ids)))
# query="SELECT * FROM product"
customers_ids=[]
for i in range(len(customer_data)):
customers_ids.append(i+1)
# query="""
# INSERT INTO wishlist(customer_id) VALUES(%s);
# """
# for i in customer_data:
# cur.execute(query,(random.choice(customers_ids),))
# query="SELECT * FROM wishlist"
# rate NUMERIC,
# # customer_id INT,
# # product_id INT,
# query="""
# INSERT INTO review(rate,customer_id,product_id) VALUES(%s,%s,%s);
# """
# for i in customer_data:
# cur.execute(query,(random.randint(1,5),random.choice(customers_ids),random.randint(1,len(product_data))))
# query='SELECT * FROM review'
# product_id INT,
# # customer_id INT,
# query="""
# INSERT INTO wishlist_products(product_id,customer_id) VALUES(%s,%s);
# """
# for i in customer_data:
# cur.execute(query,(random.randint(1,len(product_data)),random.choice(customers_ids)))
# query='SELECT * FROM wishlist_products'
# query="""
# INSERT INTO product_tags(product_id,tag_id) VALUES(%s,%s);
# """
# for i in product_data:
# cur.execute(query,(random.randint(1,len(product_data)),random.randint(1,len(tag_data))))
# query='SELECT * FROM product_tags'
# query="""
# SELECT *
# FROM product_tags pt
# LEFT JOIN tag t ON pt.tag_id = t.id
# WHERE pt.product_id = 5;
# """
# # query='SELECT * FROM product'
# query="""
# SELECT *
# FROM product
# LEFT JOIN seller ON product.seller_id = seller.id
# WHERE seller.id = 5;
# """
# query="""
# SELECT *
# FROM wishlist_products
# LEFT JOIN product ON wishlist_products.product_id = product.id
# WHERE wishlist_products.customer_id = 2;
# """
# query="""
# SELECT p.id, p.title
# FROM product p
# LEFT JOIN review r ON p.id = r.product_id
# GROUP BY p.id, p.title
# ORDER BY rate DESC
# LIMIT 10;
# """
# # query='''
# # SELECT * FROM review LEFT JOIN product ON product_id=product.id WHERE product_id=2 ;
# # '''
# # WHERE product_id IN (SELECT AVG(rate) FROM review GROUP BY product_id ORDER BY AVG(rate) DESC)
# # query="SELECT * FROM product"
#Burdan basliyir
# Bir teq seçin və həmin teqin məhsullarını göstərin
query="""SELECT * FROM product
LEFT JOIN product_tags on product_tags.product_id=product.id WHERE product_tags.tag_id=5"""
# Bir məhsul seçin və həmin məhsulların teqlərini göstərin
# query="""SELECT * FROM product_tags
# LEFT JOIN product on product.id=product_tags.product_id WHERE product.id=5
# """
# Bir satıcı seçin və həmin satıcının məhsullarını göstərin
# query="""
# SELECT * FROM product
# LEFT JOIN seller on seller.id=product.seller_id WHERE seller.id=5
# """
# Bir müştəri seçin və həmin müştərinin wishlistindəki məhsulları göstərin
# query="""
# SELECT * FROM wishlist_products
# LEFT JOIN customer on wishlist_products.customer_id=customer.id WHERE customer.id=45
# """
# Review ortalaması ən yüksək olan 10 məhsulu həmin ortalama ilə birlikdə göstərin
# query="""SELECT AVG(rate),product.id FROM product
# LEFT JOIN review on product.id=review.product_id GROUP BY product.id ORDER BY AVG(rate) LIMIT 10"""
# teqləri məhsullarının sayına görə düzün və bunu edərkən də məhsulların sayı da görünsün
# query="""
# SELECT COUNT(product_tags.product_id),product_tags.tag_id FROM product_tags LEFT JOIN tag on product_tags.tag_id=tag.id GROUP BY product_tags.tag_id ORDER BY COUNT(product_tags.product_id) DESC
# """
# Wishlistindəki məhsulların toplam qiyməti ən çox olan 10 müşətirini göstərin. Bunu edərkən həmin qiymət toplamı da görünsün
# query="""
# SELECT customer.id,SUM(wishlist_products.product_id) FROM customer LEFT JOIN wishlist_products on customer.id=wishlist_products.customer_id GROUP BY customer.id HAVING SUM(wishlist_products.product_id) IS NOT NULL ORDER BY SUM(wishlist_products.product_id) DESC LIMIT 10
# """
# id-lərinə görə ilk 10 satıcının məlumatlarını və həmin satıcının məhsullarına gələn reviewların ortalamasını göstərin
query="""
SELECT customer.id, AVG(rate) FROM customer LEFT JOIN review on customer.id=review.customer_id GROUP BY customer.id HAVING AVG(rate) IS NOT NULL ORDER BY AVG(rate) DESC LIMIT 10
"""
show(cur)
con.commit()
|
MuradAsadzade/Postresql-join-tasks
|
ecommerce.py
|
ecommerce.py
|
py
| 13,617 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "psycopg2.connect",
"line_number": 3,
"usage_type": "call"
}
] |
8099610278
|
from datetime import datetime
import os
# from dataclasses import dataclass
from sensor.constant.trainingPipeline_consts import *
class TrainingPipelineConfig:
def __init__(self, timestamp=datetime.now()):
timestamp = timestamp.strftime("%m_%d_%Y_%H_%M_%S")
self.pipeline_name: str = PIPELINE_NAME
self.artifact_dir: str = os.path.join(ARTIFACT_DIR, timestamp)
self.timestamp: str = timestamp
class DataIngestionConfig:
def __init__(self, training_pipeline_config:TrainingPipelineConfig):
self.data_ingestion_dir: str = os.path.join(
training_pipeline_config.artifact_dir, DATA_INGESTION_DIR_NAME
)
self.feature_store_file_path: str = os.path.join(
self.data_ingestion_dir, DATA_INGESTION_FEATURE_STORE_DIR, FILE_NAME
)
self.training_file_path: str = os.path.join(
self.data_ingestion_dir, DATA_INGESTION_INGESTED_DIR, TRAIN_FILE_NAME
)
self.testing_file_path: str = os.path.join(
self.data_ingestion_dir, DATA_INGESTION_INGESTED_DIR, TEST_FILE_NAME
)
self.train_test_split_ratio: float = DATA_INGESTION_TRAIN_TEST_SPLIT_RATION
self.collection_name: str = DATA_INGESTION_COLLECTION_NAME
|
sverma1999/sensor-fault-detection
|
sensor/entity/config_entity.py
|
config_entity.py
|
py
| 1,269 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
}
] |
31788132323
|
from typing import TYPE_CHECKING, Iterable, List, Optional, Union, overload
from ..builtins import types
from ..common.error import ConstraintError
from ..node import (
ArrayTypeNode,
FuncTypeNode,
PointerTypeNode,
SimpleTypeNode,
TypeNode,
)
if TYPE_CHECKING:
from .block import Expression
class ChunkVariable:
def __init__(
self,
name: str,
vtype: Optional[TypeNode],
chunk: Optional["Chunk"],
initial: Optional["Expression"] = None,
):
self.name = name
self.vtype = vtype
self.chunk = chunk
self.initial = initial
def _typenamestr(
self, tp: TypeNode, original: Optional[str], name: Optional[str]
) -> str:
original = original or ""
name = name or ""
if isinstance(tp, SimpleTypeNode):
return f"{types.TRANSLATIONS[tp.core]} {name}"
elif isinstance(tp, FuncTypeNode):
args = ", ".join(self._typenamestr(arg, None, None) for arg in tp.args)
if name == original:
assert original
base = f"{original}({args})"
return self._typenamestr(tp.ret, base, base)
else:
ret = self._typenamestr(tp.ret, None, None)
return f"{ret} ({name})({args})"
elif isinstance(tp, PointerTypeNode):
return self._typenamestr(tp.base, original, f"*{name}")
elif isinstance(tp, ArrayTypeNode):
return self._typenamestr(tp.base, original, f"{name}[{tp.size}]")
else:
raise RuntimeError("invalid variable type")
def typename(self) -> str:
if self.vtype is None:
return f"void {self.name}"
return self._typenamestr(self.vtype, self.name, self.name)
def typestr(self) -> str:
if self.vtype is None:
return "void"
return self._typenamestr(self.vtype, None, None).strip()
def _basic_types(self, tp: TypeNode) -> Iterable[str]:
if isinstance(tp, SimpleTypeNode):
yield tp.core
elif isinstance(tp, PointerTypeNode):
yield from self._basic_types(tp.base)
elif isinstance(tp, ArrayTypeNode):
yield from self._basic_types(tp.base)
elif isinstance(tp, FuncTypeNode):
yield from self._basic_types(tp.ret)
for arg in tp.args:
yield from self._basic_types(arg)
else:
raise RuntimeError("invalid variable type")
def basic_types(self) -> Iterable[str]:
if self.vtype is None:
return iter(())
return self._basic_types(self.vtype)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.typename()}>"
class ChunkConstraint:
def __init__(self, islocal=False, isglobal=False, static=False):
self.islocal = islocal
self.isglobal = isglobal
self.static = static
self._verify()
def copy(self) -> "ChunkConstraint":
return ChunkConstraint(
islocal=self.islocal, isglobal=self.isglobal, static=self.static
)
def merge(self, other: "ChunkConstraint"):
self.islocal = self.islocal or other.islocal
self.isglobal = self.isglobal or other.isglobal
self.static = self.static or other.static
self._verify()
def _verify(self):
if self.islocal and self.isglobal:
raise ConstraintError("cannot allow local and global constraints")
def __repr__(self) -> str:
return f"<{self.__class__.__name__} local={self.islocal} global={self.isglobal} static={self.static}>"
class Chunk:
def __init__(
self,
variables: List[ChunkVariable],
constraint: Optional[ChunkConstraint] = None,
):
self.variables = variables
self._table = {
var.name: i
for i, var in enumerate(variables)
if not var.name.startswith("_")
}
self.constraint = ChunkConstraint() if constraint is None else constraint
@property
def varnames(self) -> List[str]:
return [var.name for var in self.variables]
def add_variable(self, variable: ChunkVariable):
if variable.name in self._table:
raise KeyError("variable already exists in chunk")
self.variables.append(variable)
self._table[variable.name] = len(self.variables) - 1
def rename_variable(self, variable: ChunkVariable, name: str):
if variable not in self.variables:
raise KeyError("variable not in chunk")
idx = self._table[variable.name]
self._table.pop(variable.name)
variable.name = name
self._table[variable.name] = idx
def remove_variable(self, variable: ChunkVariable):
if variable.name not in self._table:
raise KeyError("variable not in chunk table")
idx = self._table[variable.name]
target = self.variables[idx]
if target is not variable:
raise KeyError("variable does not match")
self.variables.remove(target)
self._table.pop(target.name)
def lookup(self, name: str) -> Optional[ChunkVariable]:
i = self._table.get(name)
if i is None:
return None
else:
return self.variables[i]
def __contains__(self, var: Union[str, ChunkVariable]) -> bool:
if isinstance(var, str):
return var in self._table
else:
return var in self.variables
def __repr__(self) -> str:
names = ", ".join(var.name for var in self.variables)
return f"<{self.__class__.__name__} {names}>"
@overload
def merge_chunks(first: Optional[Chunk], second: Chunk) -> Chunk:
...
@overload
def merge_chunks(first: Chunk, second: Optional[Chunk]) -> Chunk:
...
def merge_chunks(first: Optional[Chunk], second: Optional[Chunk]) -> Chunk:
if first is None:
assert second is not None
return second
if second is None:
assert first is not None
return first
constraint = first.constraint.copy()
constraint.merge(second.constraint)
return Chunk([*first.variables, *second.variables], constraint)
|
jedevc/fyp
|
vulnspec/graph/chunk.py
|
chunk.py
|
py
| 6,242 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "node.TypeNode",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "node.TypeNode",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "node.SimpleTypeNode",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "builtins.types.TRANSLATIONS",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "builtins.types",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "node.FuncTypeNode",
"line_number": 38,
"usage_type": "argument"
},
{
"api_name": "node.PointerTypeNode",
"line_number": 47,
"usage_type": "argument"
},
{
"api_name": "node.ArrayTypeNode",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "node.TypeNode",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "node.SimpleTypeNode",
"line_number": 67,
"usage_type": "argument"
},
{
"api_name": "node.PointerTypeNode",
"line_number": 69,
"usage_type": "argument"
},
{
"api_name": "node.ArrayTypeNode",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "node.FuncTypeNode",
"line_number": 73,
"usage_type": "argument"
},
{
"api_name": "typing.Iterable",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "common.error.ConstraintError",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "typing.overload",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "typing.overload",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 193,
"usage_type": "name"
}
] |
16563057746
|
import numpy as np
import time, cv2, copy, os, random, sys
# Check if Running On Pi
import io
import os
def is_raspberrypi():
try:
with io.open('/sys/firmware/devicetree/base/model', 'r') as m:
if 'raspberry pi' in m.read().lower(): return True
except Exception: pass
return False
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
class image_processor:
def __init__(self, pixelValues, displayDim, image_folder): #displayName = 'generic display'
# Final Image Dimensions and Colors
self.dispWidth = displayDim[0]
self.dispHeight = displayDim[1]
self.pixelColors = pixelValues
self.image_folder = image_folder
#print('processor extablished for ' + displayName + ' dimension: ' + str(self.displayWidth) + 'x' + str(self.displayHeight) + ' pixel values: ' + pixelValues)
def newImage(self, image_title):
self.imgTitle = str(sys.path[0])+ '\DispPics' + str(image_title)
print("imported Image Title = " + self.imgTitle + " ----- of type " + str(type(self.imgTitle)))
def getImageTitle(self):
return self.imgTitle
def __displayRGB(self):
r = self.__imageResizeRGB()
plt.imshow(r)
plt.show()
# split self off
def __imageResizeRGB(self):
img = cv2.imread(self.imgTitle)
resized = cv2.resize(img, (self.dispWidth, self.dispHeight), interpolation = cv2.INTER_AREA)
return resized
def __displayBW(self):
r = self._imageResizeBW()
plt.imshow(r, cmap = "gray")
plt.show()
# split self off
def __imageResizeBW(self):
img = cv2.imread(self.imgTitle)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(imgGray, (self.dispWidth, self.dispHeight), interpolation = cv2.INTER_AREA)
return resized
def __reduceColors(self, img, K):
n = img[0][0].size
Z = img.reshape((-1,n))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
return res2
def __removeColors(self, img):
recorded = np.unique(img)
imgCopy = copy.deepcopy(img)
for y in range(0,len(img)):
for x in range(0,len(img[0])):
for n in range(0,len(recorded)):
if imgCopy[y][x] == recorded[n]:
imgCopy[y][x] = n
return imgCopy
def defaultConverter(self, imgTit = False, k = 4):
if imgTit is False:
self.getRandomImage()
else:
self.newImage(imgTit)
bw = self.__imageResizeBW()
lowRes = self.__reduceColors(bw, k)
remapped = self.__removeColors(lowRes)
return remapped
# Fucking Hell getRandomImage not working consistently
def getRandomImage(self):
#Compensate if is real raspberry pi
n=0
random.seed()
print("penis")
print(str(sys.path[0]) + self.image_folder)
print("penis")
for root, dirs, files in os.walk(str(sys.path[0]) + self.image_folder):
print("penis")
for name in files:
n += 1
if random.uniform(0, n) < 1:
print("got rfile")
rfile = os.path.join(root, name)
else:
print("rfile not selected")
print(rfile)
self.imgTitle = rfile
if __name__ == '__main__':
dispDim = (16, 16)
directory = "/DispPics"
ip = image_processor(('#CD853F','#8B5A2B','#008080','#D8BFD8'), dispDim, directory)
print(ip.defaultConverter(k = 3))
i = 1
while True:
time.sleep(1)
i += 1
|
Rolling-Blocks/RB-CODE-Prototype-1
|
image_processor.py
|
image_processor.py
|
py
| 4,098 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "io.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "cv2.TERM_CRITERIA_EPS",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "cv2.TERM_CRITERIA_MAX_ITER",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "cv2.kmeans",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cv2.KMEANS_RANDOM_CENTERS",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "random.uniform",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 126,
"usage_type": "call"
}
] |
28313903181
|
from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QMenuBar, QAction, QTextEdit, QHBoxLayout, QWidget, QFontDialog, QColorDialog, QFileDialog, QDialog, QVBoxLayout, QMessageBox
from PyQt5 import QtGui, QtCore
from PyQt5.QtGui import QIcon
from PyQt5.QtPrintSupport import QPrinter, QPrintDialog, QPrintPreviewDialog
from PyQt5.QtCore import QFileInfo
import sys
class Window(QMainWindow): # Klasse Fenster
def __init__(self):
super().__init__()
self.title = ('Einfacher Text Editor mit PDF Funktion') # Window Title
self.top = 400 #
self.left = 600 # Abstand
self.width = 400 #
self.height = 300 #
self.iconName = 'win.png' #Icon
self.setWindowIcon(QIcon(self.iconName))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createEditor() # Anzeigen von Editor
self.CreateMenu() # Anzeigen von der Menü Bar
self.show()
#--------------------------------- M e n ü B a r -------------------------------#
def CreateMenu(self):
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu("Datei")
editMenu = mainMenu.addMenu("Bearbeiten")
infoMenu = mainMenu.addMenu("Info")
helpAction = QAction(QtGui.QIcon(""), 'Help', self)
helpAction.setShortcut("")
helpAction.triggered.connect(self.helpAction)
infoMenu.addAction(helpAction) # Öffnen
openAction = QAction(QIcon("open.png"), 'Öffnen', self)
openAction.setShortcut("")
openAction.triggered.connect(self.openAction)
fileMenu.addAction(openAction) # Öffnen
saveAction = QAction(QIcon("save.png"), 'Speichern unter', self)
saveAction.setShortcut("")
saveAction.triggered.connect(self.saveAction)
fileMenu.addAction(saveAction) # Speichern
printAction = QAction(QIcon("print.png"), 'Drucken', self)
printAction.setShortcut("")
printAction.triggered.connect(self.printDialog)
fileMenu.addAction(printAction) # Drucken
printpreviewAction = QAction(QIcon("preprint.png"), 'Druckvorschau', self)
printpreviewAction.triggered.connect(self.printPreviewDialog)
fileMenu.addAction(printpreviewAction) # Vorschau Druck
pdfAction = QAction(QIcon("pdf.png"), 'PDF Exportieren', self)
pdfAction.triggered.connect(self.pdfExport)
fileMenu.addAction(pdfAction) # Vorschau Druck
exitAction = QAction(QIcon("exit.png"), 'Beenden', self)
exitAction.setShortcut("")
exitAction.triggered.connect(self.exitWindow)
fileMenu.addAction(exitAction) # Beenden
editAction = QAction(QIcon("edit.png"), 'Schrift', self)
editAction.setShortcut("")
editAction.triggered.connect(self.fontDialog)
editMenu.addAction(editAction) # Bearbeiten
colorAction = QAction(QIcon("color.png"), 'Schrift Farbe', self) # Schrift Farbe
colorAction.triggered.connect(self.colorDialog)
editMenu.addAction(colorAction)
#------------------------ Exit Button funktion ----------------------------------#
def exitWindow(self):
self.close()
#-------------------------Text Editor---------------------------------------------#
def createEditor(self):
self.textEdit = QTextEdit(self)
self.setCentralWidget(self.textEdit)
#------------------------Schrift Dialog------------------------------------------#
def fontDialog(self):
font, ok = QFontDialog.getFont()
if ok:
self.textEdit.setFont(font)
#----------------------- Schrift Farbe Dialog ----------------------------------#
def colorDialog(self):
color = QColorDialog.getColor()
self.textEdit.setTextColor(color)
#----------------------------Drucken der Datei---------------------------------#
def printDialog(self):
printer = QPrinter(QPrinter.HighResolution)
dialog = QPrintDialog(printer, self)
if dialog.exec_() == QPrintDialog.Accepted:
self.textEdit.print_(printer)
#--------------------------Druck Vorschau---------------------------------------#
def printPreviewDialog(self):
printer = QPrinter(QPrinter.HighResolution)
previewDialog = QPrintPreviewDialog(printer, self)
previewDialog.paintRequested.connect(self.printPreview)
previewDialog.exec_()
def printPreview(self, printer):
self.textEdit.print_(printer)
#-------------------------PDF Exporter-----------------------------------------#
def pdfExport(self):
fn, _= QFileDialog.getSaveFileName(self, "Export PDF", None, "PDF files (.pdf);;All Files()")
if fn != '':
if QFileInfo(fn).suffix() == "" :fn += '.pdf'
printer = QPrinter(QPrinter.HighResolution)
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName(fn)
self.textEdit.document ().print_(printer)
#-------------------------------Datei Laden------------------------------------#
def openAction(self):
fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
if fname[0]:
f = open(fname[0], 'r')
with f:
data = f.read()
self.textEdit.setText(data)
#------------------------------Datei Speichern---------------------------------#
def saveAction(self):
filename, _ = QFileDialog.getSaveFileName(self, 'Datei Speichern', ".txt", "Alle Datein (*);; Text Datei (*.txt)")
if filename:
with open(filename, "w") as file:
file.write(self.textEdit.toPlainText())
file.close()
#-----------------------------Message Box-------------------------------------#
def helpAction(self):
QMessageBox.about(self, "Entwickelt mit QT5", "Alpha 1.0")
#------------------------------Ende-------------------------------------------#
App = QApplication(sys.argv)
Window = Window()
sys.exit(App.exec_())
|
schnuppi1984/Easy-Text-Editor
|
start.py
|
start.py
|
py
| 6,702 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QTextEdit",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFontDialog.getFont",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFontDialog",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QColorDialog.getColor",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QColorDialog",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrinter",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrinter.HighResolution",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrintDialog",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrintDialog.Accepted",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrintDialog",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrinter",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrinter.HighResolution",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrintPreviewDialog",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog.getSaveFileName",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QFileInfo",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrinter",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrinter.HighResolution",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrinter.PdfFormat",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtPrintSupport.QPrinter",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog.getSaveFileName",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox.about",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 169,
"usage_type": "call"
}
] |
2987884048
|
from urllib2 import urlopen, HTTPError
from django.template.defaultfilters import slugify
from django.core.files.base import ContentFile
from django.db import transaction, IntegrityError
from item.models import Item, Link
from movie.models import Movie, Actor, Director, Genre
from decorators.retry import retry
class LoadMovie():
"""
This manager inserts a movie into the database along with its
corresponding genres, actors, and directors.
"""
exists = False
def __init__(self, title, imdb_id, runtime,
synopsis, theater_date, keywords):
"""
Inserts the movie into the database if it doesn't already
exist in the database.
"""
try:
self.movie, self.created = Movie.objects.get_or_create(
title=title,
imdb_id=imdb_id,
runtime=runtime,
synopsis=synopsis,
theater_date=theater_date,
keywords = keywords,
url=slugify(title)
)
except IntegrityError:
print('TRANSACTION FAILED ON MOVIE INSERT: Rolling back now...')
transaction.rollback()
def insert_genres(self, genres):
"""
Inserts the genres for the movie.
"""
genre_list = []
try:
for g in genres:
genre, created = Genre.objects.get_or_create(
name=g, url=slugify(g))
genre_list.append(genre)
self.movie.genre.add(*genre_list)
except IntegrityError:
print('TRANSACTION FAILED ON GENRE INSERT: Rolling back now...')
transaction.rollback()
def insert_actors(self, actors):
"""
Inserts the actors for the movie.
"""
actor_list = []
try:
for a in actors:
actor, created = Actor.objects.get_or_create(
name=a, url=slugify(a))
actor_list.append(actor)
self.movie.actors.add(*actor_list)
except IntegrityError:
print('TRANSACTION FAILED ON ACTOR INSERT: Rolling back now...')
transaction.rollback()
def insert_directors(self, directors):
"""
Inserts the directors for the movie.
"""
director_list = []
try:
for d in directors:
director, created = Director.objects.get_or_create(
name=d, url=slugify(d))
director_list.append(director)
self.movie.directors.add(*director_list)
except IntegrityError:
print('TRANSACTION FAILED ON DIRECTOR INSERT: Rolling back now...')
transaction.rollback()
@retry(HTTPError)
def insert_image(self, url):
"""
Inserts the image for the movie.
"""
try:
if 'default.jpg' in self.movie.image.url or self.created:
image = urlopen(url, timeout=15)
self.movie.image.save(
self.movie.url+u'.jpg',
ContentFile(image.read())
)
except IntegrityError:
print('TRANSACTION FAILED ON IMAGE INSERT: Rolling back now...')
transaction.rollback()
def insert_trailer(self, url):
"""
Inserts the trailer as a link.
"""
try:
Link.objects.get_or_create(
item=self.movie.item,
partner="YouTube",
url=url
)
except IntegrityError:
print('TRANSACTION FAILED ON TRAILER INSERT: Rolling back now...')
transaction.rollback()
|
sameenjalal/mavenize-beta
|
mavenize/lib/db/loadmovie.py
|
loadmovie.py
|
py
| 3,712 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "movie.models.Movie.objects.get_or_create",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "movie.models.Movie.objects",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "movie.models.Movie",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.template.defaultfilters.slugify",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.IntegrityError",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.db.transaction.rollback",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "movie.models.Genre.objects.get_or_create",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "movie.models.Genre.objects",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "movie.models.Genre",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "django.template.defaultfilters.slugify",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.db.IntegrityError",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.db.transaction.rollback",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "movie.models.Actor.objects.get_or_create",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "movie.models.Actor.objects",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "movie.models.Actor",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "django.template.defaultfilters.slugify",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.db.IntegrityError",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "django.db.transaction.rollback",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "movie.models.Director.objects.get_or_create",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "movie.models.Director.objects",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "movie.models.Director",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "django.template.defaultfilters.slugify",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "django.db.IntegrityError",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "django.db.transaction.rollback",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "urllib2.urlopen",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "django.core.files.base.ContentFile",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "django.db.IntegrityError",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "django.db.transaction.rollback",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "decorators.retry.retry",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "urllib2.HTTPError",
"line_number": 83,
"usage_type": "argument"
},
{
"api_name": "item.models.Link.objects.get_or_create",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "item.models.Link.objects",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "item.models.Link",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "django.db.IntegrityError",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "django.db.transaction.rollback",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 111,
"usage_type": "name"
}
] |
31236652781
|
from youtube3.youtube import *
import json
from oauth2client.tools import argparser
import re
def process_videos(workDir='.', inputFile='liked.json', recommendedFile='recommended.json',
excludedFile='excluded.json', postponedFile='postponed.json',maxCount=5):
recommended, excluded, postponed, liked = {}, {}, {}, {}
workDir, inputFile, recommendedFile, excludedFile, postponedFile = workDir or '.', inputFile or 'liked.json', \
recommendedFile or 'recommended.json', excludedFile or 'excluded.json', postponedFile or 'postponed.json'
liked = load_definition(liked, inputFile, workDir)
recommended = load_definition(recommended, recommendedFile, workDir)
excluded = load_definition(excluded, excludedFile, workDir)
postponed = load_definition(postponed, postponedFile, args.workDir)
start = int(args.start) if args.start else 0
end = min(int(args.end), len(liked)) if args.end else len(liked)
youtube = Youtube(get_authenticated_service(args))
likedList = list(liked.items())[start:end]
for videoId, title in likedList:
print("Now processing %s, %s" % (videoId, title))
for relatedvideos in youtube.iterate_related_videos(videoId, maxCount):
for item in relatedvideos['items']:
rvideoId, rtitle = item['id']['videoId'], item['snippet']['title']
if rvideoId not in liked and rvideoId not in excluded and rvideoId not in postponed:
if rvideoId not in recommended:
recommended[rvideoId] = {"title": rtitle, "count": 1}
else:
recommended[rvideoId]["count"] += 1
recommendedSorted = sorted(recommended.items(), key=lambda x: x[1]["count"], reverse=True)
return recommendedSorted
def load_definition(records, inputFile, workDir):
inputFileC = workDir + '/' + inputFile
if os.path.isfile(inputFileC):
with open(inputFileC, 'r', encoding="utf-8") as f:
records = dict(json.load(f))
else:
print("Cannot find file {}".format(inputFileC))
return records
def tokenize_lists( recommended, liked, workDir , ignore_words_file):
def get_tokenized(str,ignored_words):
str = str.lower()
str = re.sub(r"\(.*\)", "" , str)
str = re.sub(r"[0-9]+", "", str)
strtok = re.split(r'[\[\s\-\(\)\"\\\/\|\!\&\,\.\+]',str)
strl = [s for s in strtok if s not in ignored_words and len(s) > 0]
return strl
ignored_words = []
if os.path.isfile(workDir + '/' + ignore_words_file):
with open(workDir + '/' + ignore_words_file, 'r', encoding="utf-8") as f:
ignored_words = f.read().splitlines()
ignored_words = [ i.lower() for i in ignored_words]
tok_liked = {k:get_tokenized(v,ignored_words) for k,v in liked.items()}
tok_liked_list = [get_tokenized(v, ignored_words) for k, v in liked.items()]
#print(tok_liked_list)
tok_recommended = {k: {"title": get_tokenized(v["title"],ignored_words), "count": v["count"]} for k, v in recommended.items()}
tok_duplicates = {k: {"title": v["title"], "count": v["count"]} for k, v in
tok_recommended.items() if v["title"] in tok_liked_list}
tok_no_duplicates = {k: {"title": v["title"], "count": v["count"]} for k, v in
tok_recommended.items() if v["title"] not in tok_liked_list}
return tok_duplicates, tok_no_duplicates
def save_recommended(workDir='.', recommendedFile='recommended.json', recommendedSorted={} ):
workDir, recommendedFile, recommendedSorted = workDir or '.', \
recommendedFile or 'recommended.json', recommendedSorted or {}
save_to_json(recommendedFile, recommendedSorted, workDir)
def save_to_json(outputFile, outputData, workDir):
with open(workDir + '/' + outputFile, 'w', encoding="utf-8") as f:
json.dump(outputData, f, ensure_ascii=False)
print("Saved file: {}".format(workDir + '/' + outputFile))
def retrieve_recommended(args):
recommendedSorted = process_videos(workDir=args.workDir, inputFile=args.inputFile,
recommendedFile=args.recommendedFile,
excludedFile=args.excludedFile, postponedFile=args.postponedFile,
maxCount=args.maxCount)
save_recommended(workDir=args.workDir, recommendedFile=args.recommendedFile, recommendedSorted=recommendedSorted)
return recommendedSorted
def eliminate_duplicates(args):
liked, recommended = {}, {}
liked = load_definition(liked, args.inputFile, args.workDir)
recommended = load_definition(recommended, args.recommendedFile or 'recommended.json', args.workDir)
duplicates, no_duplicates = tokenize_lists(recommended=recommended, liked=liked, workDir=args.workDir,
ignore_words_file='ignore_words.txt')
save_to_json(outputData=list([[k, v] for k, v in duplicates.items()]), outputFile='duplicates.json',
workDir=args.workDir)
save_to_json(outputData=list([[k, v] for k, v in no_duplicates.items()]), outputFile='recommended_no_dup.json',
workDir=args.workDir)
if __name__ == "__main__":
argparser.add_argument('--workDir')
argparser.add_argument('--maxCount')
argparser.add_argument('--inputFile')
argparser.add_argument('--start')
argparser.add_argument('--end')
argparser.add_argument('--recommendedFile')
argparser.add_argument('--excludedFile')
argparser.add_argument('--postponedFile')
args = argparser.parse_args()
if (args.workDir is None):
print("Usage : python recommend_videos.py --workdDir <workDir> --maxCount <maxCount> --inputFile <file>")
sys.exit(0)
if not os.path.isdir(args.workDir):
print("{} does not exist -- exiting".format(args.workDir))
sys.exit(0)
retrieve_recommended(args)
eliminate_duplicates(args)
|
diegoami/DA-youtube-scripts
|
youtube-scripts/recommend_videos.py
|
recommend_videos.py
|
py
| 6,021 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.load",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "oauth2client.tools.argparser.add_argument",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "oauth2client.tools.argparser",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "oauth2client.tools.argparser.add_argument",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "oauth2client.tools.argparser",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "oauth2client.tools.argparser.add_argument",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "oauth2client.tools.argparser",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "oauth2client.tools.argparser.add_argument",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "oauth2client.tools.argparser",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "oauth2client.tools.argparser.add_argument",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "oauth2client.tools.argparser",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "oauth2client.tools.argparser.add_argument",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "oauth2client.tools.argparser",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "oauth2client.tools.argparser.add_argument",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "oauth2client.tools.argparser",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "oauth2client.tools.argparser.add_argument",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "oauth2client.tools.argparser",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "oauth2client.tools.argparser.parse_args",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "oauth2client.tools.argparser",
"line_number": 123,
"usage_type": "name"
}
] |
70411254267
|
import logging
import re
import subprocess
import operator
import time
import os
import shutil
import glob
import sys
import resource
import signal
from odoo import models, fields, api
from odoo.tools import config, appdirs, DEFAULT_SERVER_DATETIME_FORMAT
from .tools import dashes, dt2time, uniq_list, mkdirs, local_pgadmin_cursor, run, grep, lock, locked, rfind, fqdn
_logger = logging.getLogger(__name__)
_re_error = r'^(?:\d{4}-\d\d-\d\d \d\d:\d\d:\d\d,\d{3} \d+ (?:ERROR|CRITICAL) )|(?:Traceback \(most recent call last\):)$'
_re_warning = r'^\d{4}-\d\d-\d\d \d\d:\d\d:\d\d,\d{3} \d+ WARNING '
_re_job = re.compile('job_\d')
class RunbotBuild(models.Model):
_name = "runbot.build"
_order = 'id desc'
branch_id = fields.Many2one('runbot.branch', 'Branch', required=True, ondelete='cascade')
repo_id = fields.Many2one('runbot.repo', string='Repository', related='branch_id.repo_id')
name = fields.Char('Revno', required=True)
host = fields.Char('Host')
port = fields.Integer('Port')
dest = fields.Char(compute='_compute_dest_domain', string='Dest', readonly=1, store=True)
domain = fields.Char(compute='_compute_dest_domain', string='URL')
date = fields.Datetime('Commit date')
author = fields.Char('Author')
author_email = fields.Char('Author Email')
committer = fields.Char('Committer')
committer_email = fields.Char('Committer Email')
subject = fields.Text('Subject')
sequence = fields.Integer('Sequence')
modules = fields.Char("Modules to Install")
result = fields.Char('Result') # ok, ko, warn, skipped, killed
pid = fields.Integer('Pid')
state = fields.Char('Status', default='pending') # pending, testing, running, done, duplicate
job = fields.Char('Job') # job_*
job_start = fields.Datetime('Job start')
job_end = fields.Datetime('Job end')
job_time = fields.Integer(compute='_compute_time_age', string='Job time')
job_age = fields.Integer(compute='_compute_time_age', string='Job age')
duplicate_id = fields.Many2one('runbot.build', 'Corresponding Build')
server_match = fields.Selection([('builtin', 'This branch includes Odoo server'),
('exact', 'branch/PR exact name'),
('prefix', 'branch whose name is a prefix of current one'),
('fuzzy', 'Fuzzy - common ancestor found'),
('default', 'No match found - defaults to master')],
string='Server branch matching')
@api.depends('branch_id')
def _compute_dest_domain(self):
domain = self.env['runbot.repo'].domain()
for build in self:
nickname = dashes(build.branch_id.name.split('/')[2])[:32]
build.dest = "%05d-%s-%s" % (build.id, nickname, build.name[:6])
if build.repo_id.nginx:
build.domain = "%s.%s" % (build.dest, build.host)
else:
build.domain = "%s:%s" % (domain, build.port)
@api.depends('job_end', 'job_start')
def _compute_time_age(self):
for r in self:
if r.job_end:
r.job_time = int(dt2time(r.job_end) - dt2time(r.job_start))
elif r.job_start:
r.job_time = int(time.time() - dt2time(r.job_start))
if r.job_start:
r.job_age = int(time.time() - dt2time(r.job_start))
def create(self, value):
build = super().create(value)
extra_info = {'sequence': build.id}
# detect duplicate
domain = [
('repo_id', '=', build.repo_id.duplicate_id.id),
('name', '=', build.name),
('duplicate_id', '=', False),
'|', ('result', '=', False), ('result', '!=', 'skipped')
]
duplicate_build = self.search(domain)
if duplicate_build:
extra_info.update({'state': 'duplicate', 'duplicate_id': duplicate_build.ids[0]})
duplicate_build.write({'duplicate_id': build.id})
build.write(extra_info)
def reset(self):
self.write({'state': 'pending'})
def logger(self, *l):
l = list(l)
for build in self:
l[0] = "%s %s" % (build.dest, l[0])
_logger.debug(*l)
def list_jobs(self):
return sorted(job for job in dir(self) if _re_job.match(job))
@api.model
def find_port(self):
# currently used port
ports = set(i['port'] for i in self.search_read([('state', 'not in', ['pending', 'done'])], ['port']))
# starting port
icp = self.env['ir.config_parameter']
port = int(icp.get_param('runbot.starting_port', default=2000))
# find next free port
while port in ports:
port += 2
return port
def _get_closest_branch_name(self, target_repo_id):
"""Return (repo, branch name) of the closest common branch between build's branch and
any branch of target_repo or its duplicated repos.
Rules priority for choosing the branch from the other repo is:
1. Same branch name
2. A PR whose head name match
3. Match a branch which is the dashed-prefix of current branch name
4. Common ancestors (git merge-base)
Note that PR numbers are replaced by the branch name of the PR target
to prevent the above rules to mistakenly link PR of different repos together.
"""
self.ensure_one()
branch_model = self.env['runbot.branch']
branch, repo = self.branch_id, self.repo_id
pi = branch._get_pull_info()
name = pi['base']['ref'] if pi else branch.branch_name
target_repo = self.env['runbot.repo'].browse(target_repo_id)
target_repo_ids = [target_repo.id]
r = target_repo.duplicate_id
while r:
if r.id in target_repo_ids:
break
target_repo_ids.append(r.id)
r = r.duplicate_id
_logger.debug('Search closest of %s (%s) in repos %r', name, repo.name, target_repo_ids)
sort_by_repo = lambda d: (not d['sticky'], # sticky first
target_repo_ids.index(d['repo_id'][0]),
-1 * len(d.get('branch_name', '')),
-1 * d['id'])
result_for = lambda d, match='exact': (d['repo_id'][0], d['name'], match)
branch_exists = lambda d: branch_model.browse(d['id'])._is_on_remote()
fields = ['name', 'repo_id', 'sticky']
# 1. same name, not a PR
domain = [
('repo_id', 'in', target_repo_ids),
('branch_name', '=', name),
('name', '=like', 'refs/heads/%'),
]
targets = branch_model.search_read(domain, fields, order='id DESC')
targets = sorted(targets, key=sort_by_repo)
if targets and branch_exists(targets[0]):
return result_for(targets[0])
# 2. PR with head name equals
domain = [
('repo_id', 'in', target_repo_ids),
('pull_head_name', '=', name),
('name', '=like', 'refs/pull/%'),
]
pulls = branch_model.search_read(domain, fields, order='id DESC')
pulls = sorted(pulls, key=sort_by_repo)
for pull in pulls:
pi = branch_model.browse(pull['id'])._get_pull_info()
if pi.get('state') == 'open':
return result_for(pull)
# 3. Match a branch which is the dashed-prefix of current branch name
branches = branch_model.search_read(
[('repo_id', 'in', target_repo_ids), ('name', '=like', 'refs/heads/%')],
fields + ['branch_name'], order='id DESC'
)
branches = sorted(branches, key=sort_by_repo)
for branch in branches:
if name.startswith(branch['branch_name'] + '-') and branch_exists(branch):
return result_for(branch, 'prefix')
# 4. Common ancestors (git merge-base)
for target_id in target_repo_ids:
common_refs = {}
cr = self.env.cr
cr.execute("""
SELECT b.name
FROM runbot_branch b,
runbot_branch t
WHERE b.repo_id = %s
AND t.repo_id = %s
AND b.name = t.name
AND b.name LIKE 'refs/heads/%%'
""", [repo.id, target_id])
for common_name, in cr.fetchall():
try:
commit = repo.git(['merge-base', branch['name'], common_name]).strip()
cmd = ['log', '-1', '--format=%cd', '--date=iso', commit]
common_refs[common_name] = repo.git(cmd).strip()
except subprocess.CalledProcessError:
# If merge-base doesn't find any common ancestor, the command exits with a
# non-zero return code, resulting in subprocess.check_output raising this
# exception. We ignore this branch as there is no common ref between us.
continue
if common_refs:
b = sorted(common_refs.items(), key=operator.itemgetter(1), reverse=True)[0][0]
return target_id, b, 'fuzzy'
# 5. last-resort value
return target_repo_id, 'master', 'default'
def path(self, *l):
self.ensure_one()
root = self.env['runbot.repo'].root()
return os.path.join(root, 'build', self.dest, *l)
def server(self, *l):
self.ensure_one()
if os.path.exists(self.path('odoo')):
return self.path('odoo', *l)
return self.path('openerp', *l)
def filter_modules(self, modules, available_modules, explicit_modules):
blacklist_modules = set(['auth_ldap', 'document_ftp', 'base_gengo',
'website_gengo', 'website_instantclick',
'pos_cache', 'pos_blackbox_be'])
mod_filter = lambda m: (
m in available_modules and
(m in explicit_modules or (not m.startswith(('hw_', 'theme_', 'l10n_'))
and m not in blacklist_modules))
)
return uniq_list(filter(mod_filter, modules))
def checkout(self):
for build in self:
# starts from scratch
if os.path.isdir(build.path()):
shutil.rmtree(build.path())
# runbot log path
mkdirs([build.path("logs"), build.server('addons')])
# checkout branch
build.repo_id.git_export(build.name, build.path())
has_server = os.path.isfile(build.server('__init__.py'))
server_match = 'builtin'
# build complete set of modules to install
modules_to_move = []
modules_to_test = ((build.branch_id.modules or '') + ',' +
(build.repo_id.modules or ''))
modules_to_test = list(filter(None, modules_to_test.split(',')))
explicit_modules = set(modules_to_test)
_logger.debug("manual modules_to_test for build %s: %s", build.dest, modules_to_test)
if not has_server:
if build.repo_id.modules_auto == 'repo':
modules_to_test += [
os.path.basename(os.path.dirname(a))
for a in glob.glob(build.path('*/__manifest__.py'))
]
_logger.debug("local modules_to_test for build %s: %s", build.dest, modules_to_test)
for extra_repo in build.repo_id.dependency_ids:
repo_id, closest_name, server_match = build._get_closest_branch_name(extra_repo.id)
repo = self.env['runbot.repo'].browse(repo_id)
_logger.debug('branch %s of %s: %s match branch %s of %s',
build.branch_id.name, build.repo_id.name,
server_match, closest_name, repo.name)
build._log(
'Building environment',
'%s match branch %s of %s' % (server_match, closest_name, repo.name)
)
repo.git_export(closest_name, build.path())
# Finally mark all addons to move to openerp/addons
modules_to_move += [
os.path.dirname(module)
for module in glob.glob(build.path('*/__manifest__.py'))
]
# move all addons to server addons path
for module in uniq_list(glob.glob(build.path('addons/*')) + modules_to_move):
basename = os.path.basename(module)
if os.path.exists(build.server('addons', basename)):
build._log(
'Building environment',
'You have duplicate modules in your branches "%s"' % basename
)
shutil.rmtree(build.server('addons', basename))
shutil.move(module, build.server('addons'))
available_modules = [
os.path.basename(os.path.dirname(a))
for a in glob.glob(build.server('addons/*/__manifest__.py'))
]
if build.repo_id.modules_auto == 'all' or (build.repo_id.modules_auto != 'none' and has_server):
modules_to_test += available_modules
modules_to_test = self.filter_modules(modules_to_test, set(available_modules), explicit_modules)
_logger.debug("modules_to_test for build %s: %s", build.dest, modules_to_test)
build.write({
'server_match': server_match,
'modules': ','.join(modules_to_test)
})
def _local_pg_dropdb(self, dbname):
with local_pgadmin_cursor() as local_cr:
local_cr.execute('DROP DATABASE IF EXISTS "%s"' % dbname)
# cleanup filestore
datadir = appdirs.user_data_dir()
paths = [os.path.join(datadir, pn, 'filestore', dbname) for pn in 'OpenERP Odoo'.split()]
run(['rm', '-rf'] + paths)
def _local_pg_createdb(self, dbname):
self._local_pg_dropdb(dbname)
_logger.debug("createdb %s", dbname)
with local_pgadmin_cursor() as local_cr:
local_cr.execute("""CREATE DATABASE "%s" TEMPLATE template0 LC_COLLATE 'C' ENCODING 'unicode'""" % dbname)
def cmd(self):
"""Return a list describing the command to start the build"""
self.ensure_one()
build = self
# Server
server_path = build.path("odoo-bin")
# for 10.0
if not os.path.isfile(server_path):
server_path = build.path("odoo.py")
# commandline
cmd = [
sys.executable,
server_path,
"--addons=%s" % build.server('addons'),
]
# options
if grep(build.server("tools/config.py"), "no-xmlrpcs"):
cmd.append("--no-xmlrpcs")
if grep(build.server("tools/config.py"), "no-netrpc"):
cmd.append("--no-netrpc")
if grep(build.server("tools/config.py"), "log-db"):
logdb = self.env.cr.dbname
if config['db_host'] and grep(build.server('sql_db.py'), 'allow_uri'):
logdb = 'postgres://{cfg[db_user]}:{cfg[db_password]}@{cfg[db_host]}/{db}'.format(cfg=config, db=logdb)
cmd += ["--log-db=%s" % logdb]
if grep(build.server("tools/config.py"), "data-dir"):
datadir = build.path('datadir')
if not os.path.exists(datadir):
os.mkdir(datadir)
cmd += ["--data-dir", datadir]
return cmd, build.modules
def spawn(self, cmd, lock_path, log_path, cpu_limit=None, shell=False):
def preexec_fn():
os.setsid()
if cpu_limit:
# set soft cpulimit
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + cpu_limit, hard))
# close parent files
os.closerange(3, os.sysconf("SC_OPEN_MAX"))
lock(lock_path)
out = open(log_path, "w")
_logger.info("spawn: %s stdout: %s", ' '.join(cmd), log_path)
p = subprocess.Popen(cmd, stdout=out, stderr=out, preexec_fn=preexec_fn, shell=shell)
return p.pid
def github_status(self):
"""Notify github of failed/successful builds"""
runbot_domain = self.env['runbot.repo'].domain()
for build in self:
desc = "runbot build %s" % (build.dest,)
if build.state == 'testing':
state = 'pending'
elif build.state in ('running', 'done'):
state = 'error'
if build.result == 'ok':
state = 'success'
if build.result == 'ko':
state = 'failure'
desc += " (runtime %ss)" % (build.job_time,)
else:
continue
status = {
"state": state,
"target_url": "http://%s/runbot/build/%s" % (runbot_domain, build.id),
"description": desc,
"context": "ci/runbot"
}
_logger.debug("github updating status %s to %s", build.name, state)
build.repo_id.github('/repos/:owner/:repo/statuses/%s' % build.name, status, ignore_errors=True)
def job_00_init(self, build, lock_path, log_path):
build._log('init', 'Init build environment')
# notify pending build - avoid confusing users by saying nothing
build.github_status()
build.checkout()
return -2
def job_10_test_base(self, build, lock_path, log_path):
build._log('test_base', 'Start test base module')
# run base test
self._local_pg_createdb("%s-base" % build.dest)
cmd, mods = build.cmd()
if grep(build.server("tools/config.py"), "test-enable"):
cmd.append("--test-enable")
cmd += ['-d', '%s-base' % build.dest, '-i', 'base', '--no-http',
'--stop-after-init', '--log-level=test', '--max-cron-threads=0']
return self.spawn(cmd, lock_path, log_path, cpu_limit=300)
def job_20_test_all(self, build, lock_path, log_path):
build._log('test_all', 'Start test all modules')
self._local_pg_createdb("%s-all" % build.dest)
cmd, mods = build.cmd()
if grep(build.server("tools/config.py"), "test-enable"):
cmd.append("--test-enable")
cmd += ['-d', '%s-all' % build.dest, '-i', mods, '--no-http',
'--stop-after-init', '--log-level=test', '--max-cron-threads=0']
# reset job_start to an accurate job_20 job_time
build.write({'job_start': fields.Datetime.now()})
return self.spawn(cmd, lock_path, log_path, cpu_limit=2100)
def job_30_run(self, build, lock_path, log_path):
# adjust job_end to record an accurate job_20 job_time
build._log('run', 'Start running build %s' % build.dest)
log_all = build.path('logs', 'job_20_test_all.txt')
log_time = time.localtime(os.path.getmtime(log_all))
v = {
'job_end': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT, log_time),
}
if grep(log_all, ".modules.loading: Modules loaded."):
if rfind(log_all, _re_error):
v['result'] = "ko"
elif rfind(log_all, _re_warning):
v['result'] = "warn"
elif not grep(build.server("test/common.py"), "post_install") or grep(log_all, "Initiating shutdown."):
v['result'] = "ok"
else:
v['result'] = "ko"
build.write(v)
build.github_status()
# run server
cmd, mods = build.cmd()
if os.path.exists(build.server('addons/im_livechat')):
cmd += ["--workers", "2"]
cmd += ["--http-port", "%d" % build.port]
cmd += ["--longpolling-port", "%d" % (build.port + 1)]
cmd += ["--max-cron-threads", "1"]
else:
# not sure, to avoid old server to check other dbs
cmd += ["--max-cron-threads", "0"]
cmd += ['-d', "%s-all" % build.dest]
if grep(build.server("tools/config.py"), "db-filter"):
if build.repo_id.nginx:
cmd += ['--db-filter', '%d.*$']
else:
cmd += ['--db-filter', '%s.*$' % build.dest]
return self.spawn(cmd, lock_path, log_path, cpu_limit=None)
def force(self):
"""Force a rebuild"""
self.ensure_one()
build = self
domain = [('state', '=', 'pending')]
sequence = self.search(domain, order='id', limit=1)
if not sequence:
sequence = self.search([], order='id desc', limit=1)
# Force it now
if build.state == 'done' and build.result == 'skipped':
values = {'state': 'pending', 'sequence': sequence, 'result': ''}
build.sudo().write(values)
# or duplicate it
elif build.state in ['running', 'done', 'duplicate']:
new_build = {
'sequence': sequence,
'branch_id': build.branch_id.id,
'name': build.name,
'author': build.author,
'author_email': build.author_email,
'committer': build.committer,
'committer_email': build.committer_email,
'subject': build.subject,
'modules': build.modules,
}
self.sudo().create(new_build)
return build.repo_id.id
def schedule(self):
jobs = self.list_jobs()
icp = self.env['ir.config_parameter']
# For retro-compatibility, keep this parameter in seconds
default_timeout = int(icp.get_param('runbot.timeout', default=1800)) / 60
for build in self:
if build.state == 'pending':
# allocate port and schedule first job
port = self.find_port()
values = {
'host': fqdn(),
'port': port,
'state': 'testing',
'job': jobs[0],
'job_start': fields.Datetime.now(),
'job_end': False,
}
build.write(values)
self._cr.commit()
else:
# check if current job is finished
lock_path = build.path('logs', '%s.lock' % build.job)
if locked(lock_path):
# kill if overpassed
timeout = (build.branch_id.job_timeout or default_timeout) * 60
if build.job != jobs[-1] and build.job_time > timeout:
build.logger('%s time exceded (%ss)', build.job, build.job_time)
build.write({'job_end': fields.Datetime.now()})
build.kill(result='killed')
continue
build.logger('%s finished', build.job)
# schedule
v = {}
# testing -> running
if build.job == jobs[-2]:
v['state'] = 'running'
v['job'] = jobs[-1]
v['job_end'] = fields.Datetime.now(),
# running -> done
elif build.job == jobs[-1]:
v['state'] = 'done'
v['job'] = ''
# testing
else:
v['job'] = jobs[jobs.index(build.job) + 1]
build.write(v)
build.refresh()
# run job
pid = None
if build.state != 'done':
build.logger('running %s', build.job)
job_method = getattr(self, build.job)
mkdirs([build.path('logs')])
lock_path = build.path('logs', '%s.lock' % build.job)
log_path = build.path('logs', '%s.txt' % build.job)
pid = job_method(build, lock_path, log_path)
build.write({'pid': pid})
# needed to prevent losing pids if multiple jobs are started and one them raise an exception
self._cr.commit()
if pid == -2:
# no process to wait, directly call next job
# FIXME find a better way that this recursive call
build.schedule()
# cleanup only needed if it was not killed
if build.state == 'done':
build._local_cleanup()
def skip(self):
self.write({'state': 'done', 'result': 'skipped'})
to_unduplicate = self.search([('id', 'in', self.ids), ('duplicate_id', '!=', False)])
for b in to_unduplicate:
b.force()
def _local_cleanup(self):
for build in self:
# Cleanup the *local* cluster
with local_pgadmin_cursor() as local_cr:
local_cr.execute("""
SELECT datname
FROM pg_database
WHERE pg_get_userbyid(datdba) = current_user
AND datname LIKE %s
""", [build.dest + '%'])
to_delete = local_cr.fetchall()
for db, in to_delete:
self._local_pg_dropdb(db)
# cleanup: find any build older than 7 days.
root = self.env['runbot.repo'].root()
build_dir = os.path.join(root, 'build')
builds = os.listdir(build_dir)
self._cr.execute("""
SELECT dest
FROM runbot_build
WHERE dest IN %s
AND (state != 'done' OR job_end > (now() - interval '7 days'))
""", [tuple(builds)])
actives = set(b[0] for b in self._cr.fetchall())
for b in builds:
path = os.path.join(build_dir, b)
if b not in actives and os.path.isdir(path):
shutil.rmtree(path)
def kill(self, result=None):
for build in self:
build._log('kill', 'Kill build %s' % build.dest)
build.logger('killing %s', build.pid)
try:
os.killpg(build.pid, signal.SIGKILL)
except OSError:
pass
v = {'state': 'done', 'job': False}
if result:
v['result'] = result
build.write(v)
self._cr.commit()
build.github_status()
build._local_cleanup()
def reap(self):
while True:
try:
pid, status, rusage = os.wait3(os.WNOHANG)
except OSError:
break
if pid == 0:
break
_logger.debug('reaping: pid: %s status: %s', pid, status)
def _log(self, func, message):
self.ensure_one()
_logger.debug("Build %s %s %s", self.id, func, message)
self.env['ir.logging'].create({
'build_id': self.id,
'level': 'INFO',
'type': 'runbot',
'name': 'odoo.runbot',
'message': message,
'path': 'runbot',
'func': func,
'line': '0',
})
|
JZ10UJS/extra-addons
|
runbot/models/runbot_build.py
|
runbot_build.py
|
py
| 27,507 |
python
|
en
|
code
| 15 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "odoo.models.Model",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Many2one",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Many2one",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Integer",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Datetime",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Text",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Integer",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Integer",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Char",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Datetime",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Datetime",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Integer",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Integer",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Many2one",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Selection",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "tools.dashes",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "odoo.api.depends",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "odoo.api",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "tools.dt2time",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "tools.dt2time",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "tools.dt2time",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "odoo.api.depends",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "odoo.api",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "odoo.api.model",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "odoo.fields",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "odoo.fields",
"line_number": 171,
"usage_type": "argument"
},
{
"api_name": "odoo.fields",
"line_number": 182,
"usage_type": "argument"
},
{
"api_name": "odoo.fields",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "operator.itemgetter",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "tools.uniq_list",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "tools.mkdirs",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 279,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 298,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "tools.uniq_list",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 314,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "tools.local_pgadmin_cursor",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "odoo.tools.appdirs.user_data_dir",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "odoo.tools.appdirs",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "tools.run",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "tools.local_pgadmin_cursor",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 348,
"usage_type": "attribute"
},
{
"api_name": "sys.executable",
"line_number": 353,
"usage_type": "attribute"
},
{
"api_name": "tools.grep",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "tools.grep",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "tools.grep",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "odoo.tools.config",
"line_number": 364,
"usage_type": "name"
},
{
"api_name": "tools.grep",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "odoo.tools.config",
"line_number": 365,
"usage_type": "name"
},
{
"api_name": "tools.grep",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "os.setsid",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "resource.getrlimit",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "resource.RLIMIT_CPU",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "resource.getrusage",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "resource.RUSAGE_SELF",
"line_number": 382,
"usage_type": "attribute"
},
{
"api_name": "resource.setrlimit",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "resource.RLIMIT_CPU",
"line_number": 384,
"usage_type": "attribute"
},
{
"api_name": "os.closerange",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "os.sysconf",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "tools.lock",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "tools.grep",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "tools.grep",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Datetime.now",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Datetime",
"line_number": 446,
"usage_type": "attribute"
},
{
"api_name": "odoo.fields",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "time.localtime",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "os.path.getmtime",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 453,
"usage_type": "attribute"
},
{
"api_name": "time.strftime",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "odoo.tools.DEFAULT_SERVER_DATETIME_FORMAT",
"line_number": 455,
"usage_type": "argument"
},
{
"api_name": "tools.grep",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "tools.rfind",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "tools.rfind",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "tools.grep",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 471,
"usage_type": "attribute"
},
{
"api_name": "tools.grep",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "tools.fqdn",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Datetime.now",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Datetime",
"line_number": 535,
"usage_type": "attribute"
},
{
"api_name": "odoo.fields",
"line_number": 535,
"usage_type": "name"
},
{
"api_name": "tools.locked",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Datetime.now",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Datetime",
"line_number": 548,
"usage_type": "attribute"
},
{
"api_name": "odoo.fields",
"line_number": 548,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Datetime.now",
"line_number": 558,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Datetime",
"line_number": 558,
"usage_type": "attribute"
},
{
"api_name": "odoo.fields",
"line_number": 558,
"usage_type": "name"
},
{
"api_name": "tools.mkdirs",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "tools.local_pgadmin_cursor",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 613,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 613,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 614,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 624,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 625,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 625,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 626,
"usage_type": "call"
},
{
"api_name": "os.killpg",
"line_number": 633,
"usage_type": "call"
},
{
"api_name": "signal.SIGKILL",
"line_number": 633,
"usage_type": "attribute"
},
{
"api_name": "os.wait3",
"line_number": 647,
"usage_type": "call"
},
{
"api_name": "os.WNOHANG",
"line_number": 647,
"usage_type": "attribute"
}
] |
13914723162
|
import sys
import oneflow as flow
import oneflow.typing as tp
import argparse
import numpy as np
import os
import shutil
import json
from typing import Tuple
from textcnn import TextCNN
sys.path.append("../..")
from text_classification.utils import pad_sequences, load_imdb_data
parser = argparse.ArgumentParser()
parser.add_argument('--ksize_list', type=str, default='2,3,4,5')
parser.add_argument('--n_filters', type=int, default=100)
parser.add_argument('--emb_dim', type=int, default=100)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--sequence_length', type=int, default=150)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--model_load_dir', type=str, default='')
parser.add_argument('--model_save_every_n_iter', type=int, default=1000)
parser.add_argument('--n_steps', type=int, default=10000)
parser.add_argument('--n_epochs', type=int, default=15)
parser.add_argument('--model_save_dir', type=str, default='./best_model')
args = parser.parse_args()
assert ',' in args.ksize_list
args.ksize_list = [int(n) for n in args.ksize_list.split(',')]
args.emb_num = 50000
args.n_classes = 2
model = TextCNN(
args.emb_num, args.emb_dim,
ksize_list=args.ksize_list,
n_filters_list=[args.n_filters] * len(args.ksize_list),
n_classes=args.n_classes, dropout=args.dropout)
def get_train_config():
config = flow.function_config()
config.default_data_type(flow.float)
return config
def get_eval_config():
config = flow.function_config()
config.default_data_type(flow.float)
return config
@flow.global_function('train', get_train_config())
def train_job(text: tp.Numpy.Placeholder((args.batch_size, args.sequence_length), dtype=flow.int32),
label: tp.Numpy.Placeholder((args.batch_size,), dtype=flow.int32)
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = model.get_logits(text, is_train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(label, logits, name="softmax_loss")
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [args.lr])
flow.optimizer.Adam(lr_scheduler).minimize(loss)
return loss
@flow.global_function('predict', get_eval_config())
def eval_job(text: tp.Numpy.Placeholder((args.batch_size, args.sequence_length), dtype=flow.int32),
label: tp.Numpy.Placeholder((args.batch_size,), dtype=flow.int32)
) -> Tuple[tp.Numpy, tp.Numpy]:
with flow.scope.placement("gpu", "0:0"):
logits = model.get_logits(text, is_train=False)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(label, logits, name="softmax_loss")
return label, logits
def suffle_batch(data, label, batch_size):
permu = np.random.permutation(len(data))
data, label = data[permu], label[permu]
batch_n = len(data) // batch_size
x_batch = np.array([data[i * batch_size:i * batch_size + batch_size] for i in range(batch_n)], dtype=np.int32)
y_batch = np.array([label[i * batch_size:i * batch_size + batch_size] for i in range(batch_n)], dtype=np.int32)
return x_batch, y_batch
def acc(labels, logits, g):
predictions = np.argmax(logits, 1)
right_count = np.sum(predictions == labels)
g["total"] += labels.shape[0]
g["correct"] += right_count
def train(checkpoint):
path = '../imdb'
(train_data, train_labels), (test_data, test_labels) = load_imdb_data(path)
with open(os.path.join(path, 'word_index.json')) as f:
word_index = json.load(f)
word_index = {k: (v + 2) for k, v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<UNK>"] = 1
train_data = pad_sequences(train_data, value=word_index["<PAD>"], padding='post', maxlen=args.sequence_length)
test_data = pad_sequences(test_data, value=word_index["<PAD>"], padding='post', maxlen=args.sequence_length)
best_accuracy = 0.0
best_epoch = 0
for epoch in range(1, args.n_epochs + 1):
print("[Epoch:{}]".format(epoch))
data, label = suffle_batch(train_data, train_labels, args.batch_size)
for i, (texts, labels) in enumerate(zip(data, label)):
loss = train_job(texts, labels).mean()
if i % 20 == 0:
print(loss)
data, label = suffle_batch(test_data, test_labels, args.batch_size)
g = {"correct": 0, "total": 0}
for i, (texts, labels) in enumerate(zip(data, label)):
labels, logits = eval_job(texts, labels)
acc(labels, logits, g)
accuracy = g["correct"] * 100 / g["total"]
print("[Epoch:{0:d} ] accuracy: {1:.1f}%".format(epoch, accuracy))
if accuracy > best_accuracy:
best_accuracy = accuracy
best_epoch = epoch
if not os.path.exists(args.model_save_dir):
os.mkdir(args.model_save_dir)
else:
shutil.rmtree(args.model_save_dir)
assert not os.path.exists(args.model_save_dir)
os.mkdir(args.model_save_dir)
print("Epoch:{} save best model.".format(best_epoch))
checkpoint.save(args.model_save_dir)
print("Epoch:{} get best accuracy:{}".format(best_epoch, best_accuracy))
if __name__ == '__main__':
checkpoint = flow.train.CheckPoint()
checkpoint.init()
train(checkpoint)
|
Oneflow-Inc/oneflow_nlp_model
|
text_classification/textcnn/train_textcnn.py
|
train_textcnn.py
|
py
| 5,411 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "textcnn.TextCNN",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "oneflow.function_config",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "oneflow.float",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "oneflow.function_config",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "oneflow.float",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "oneflow.typing.Numpy.Placeholder",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "oneflow.typing.Numpy",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "oneflow.typing",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "oneflow.int32",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "oneflow.typing.Numpy.Placeholder",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "oneflow.typing.Numpy",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "oneflow.typing",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "oneflow.int32",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "oneflow.scope.placement",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "oneflow.scope",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "oneflow.nn.sparse_softmax_cross_entropy_with_logits",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "oneflow.nn",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "oneflow.optimizer.PiecewiseConstantScheduler",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "oneflow.optimizer",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "oneflow.optimizer.Adam",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "oneflow.optimizer",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "oneflow.global_function",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "oneflow.typing.Numpy",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "oneflow.typing",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "oneflow.typing.Numpy.Placeholder",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "oneflow.typing.Numpy",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "oneflow.typing",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "oneflow.int32",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "oneflow.typing.Numpy.Placeholder",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "oneflow.typing.Numpy",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "oneflow.typing",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "oneflow.int32",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "oneflow.scope.placement",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "oneflow.scope",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "oneflow.nn.sparse_softmax_cross_entropy_with_logits",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "oneflow.nn",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "oneflow.global_function",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "oneflow.typing.Numpy",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "oneflow.typing",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "numpy.random.permutation",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "text_classification.utils.load_imdb_data",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "text_classification.utils.pad_sequences",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "text_classification.utils.pad_sequences",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "oneflow.train.CheckPoint",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "oneflow.train",
"line_number": 144,
"usage_type": "attribute"
}
] |
36321955212
|
import os
import re
import sys
import glob
import shutil
import pdftotext
def extract_Text_pdf(pdfdir):
print("Starting Text Extraction for pdf files......")
number_of_files = str(len([item for item in os.listdir(pdfdir) if os.path.isfile(os.path.join(pdfdir, item))]))
print("Processing ("+ number_of_files + ") .pdf files.....")
os.chdir(pdfdir)
file_list2 = []
for filename in glob.glob("*.pdf"):
#Get the filename without the extension for nameing later
base=os.path.basename(filename)
filenameNoExt = os.path.splitext(base)[0]
#Create a list of the text files
file_list2.append("pdf_"+filenameNoExt+".txt")
with open(filename, "rb") as f:
pdf = pdftotext.PDF(f)
filecontents = re.sub(' +', ' ', " ".join(pdf).replace("\n"," ").strip())
#Remove Non ASCII characters
filecontents2 = re.sub(r'[^\x00-\x7f]',r'', filecontents)
# content_list = list(filter(None, content_list))
with open ("pdf_"+filenameNoExt+".txt","a")as fp1:
fp1.write(filecontents2)
fp1.close()
print("Text extraction completed for ("+ number_of_files + ") .pdf files ********************")
pdf_files = 'to_process/'
extract_Text_pdf(pdf_files)
|
mstatt/Udemy_HighSpeedDataAnalysis
|
3_PDF_Text_Extraction/pdf_text_extraction.py
|
pdf_text_extraction.py
|
py
| 1,272 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "os.listdir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pdftotext.PDF",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 28,
"usage_type": "call"
}
] |
36030628166
|
"""Countdown/Stopwatch functionalities."""
import subprocess
import threading
import time
import traceback
from abc import (
ABC,
abstractmethod,
)
from pathlib import Path
from typing import (
List,
Optional,
Union,
)
from overrides import overrides
import albert as v0
import gi # isort:skip
gi.require_version("Notify", "0.7") # isort:skip
from gi.repository import (
GdkPixbuf,
Notify,
) # isort:skip
__title__ = "Countdown/Stopwatch functionalities"
__version__ = "0.4.0"
__triggers__ = "clock "
__authors__ = "Nikos Koukis"
__homepage__ = (
"https://github.com/bergercookie/awesome-albert-plugins/blob/master/plugins/clock"
)
countdown_path = str(Path(__file__).parent / "countdown.png")
stopwatch_path = str(Path(__file__).parent / "stopwatch.png")
sound_path = Path(__file__).parent.absolute() / "bing.wav"
cache_path = Path(v0.cacheLocation()) / "clock"
config_path = Path(v0.configLocation()) / "clock"
data_path = Path(v0.dataLocation()) / "clock"
dev_mode = True
# plugin main functions -----------------------------------------------------------------------
def play_sound(num):
for x in range(num):
t = threading.Timer(0.5 * x, lambda: subprocess.Popen(["cvlc", sound_path,]),)
t.start()
def notify(
app_name: str, msg: str, image=None,
):
Notify.init(app_name)
n = Notify.Notification.new(app_name, msg, image)
n.show()
def format_time(t: float):
"""Return the string representation of t. t must be in *seconds*"""
if t >= 60:
return f"{round(t / 60.0, 2)} mins"
else:
return f"{round(t, 2)} secs"
def play_icon(started) -> str:
return "▶️" if started else "⏸️"
class Watch(ABC):
def __init__(self, name):
self._name = name if name is not None else ""
self._to_remove = False
def name(self,) -> Optional[str]:
return self._name
@abstractmethod
def start(self):
pass
def started(self) -> bool:
pass
return self._started
@abstractmethod
def pause(self):
pass
@abstractmethod
def notify(self):
pass
def to_remove(self,) -> bool:
return False
class Stopwatch(Watch):
def __init__(self, name=None):
super(Stopwatch, self).__init__(name=name)
self.total_time = 0
self.latest_start = 0
self._started = False
self.latest_stop_time = 0
@overrides
def start(self):
self.latest_start = time.time()
self._started = True
self.notify(msg=f"Stopwatch [{self.name()}] starting")
@overrides
def pause(self):
stop_time = time.time()
self.total_time += stop_time - self.latest_start
self._started = False
self.notify(
msg=f"Stopwatch [{self.name()}] paused, total: {format_time(self.total_time)}"
)
self.latest_stop_time = stop_time
@overrides
def notify(self, msg):
notify(
app_name="Stopwatch", msg=msg, image=stopwatch_path,
)
@classmethod
def icon(cls):
return stopwatch_path
def destroy(self):
pass
def __str__(self):
# current interval
if self.started():
latest = time.time()
else:
latest = self.latest_stop_time
current_interval = latest - self.latest_start
total = self.total_time + current_interval
s = get_as_subtext_field(play_icon(self._started))
s += get_as_subtext_field(self.name())
s += get_as_subtext_field(format_time(total), "Total",)
s += get_as_subtext_field(format_time(current_interval), "Current Interval",)[:-2]
return s
class Countdown(Watch):
def __init__(
self, name: str, count_from: float,
):
super(Countdown, self).__init__(name=name)
self.latest_start = 0
self.remaining_time = count_from
self._started = False
self.timer = None
@overrides
def start(self):
self._started = True
self.latest_start = time.time()
self.timer = threading.Timer(self.remaining_time, self.time_elapsed,)
self.timer.start()
self.notify(
msg=f"Countdown [{self.name()}] starting, remaining: {format_time(self.remaining_time)}"
)
@overrides
def pause(self):
self._started = False
self.remaining_time -= time.time() - self.latest_start
if self.timer:
self.timer.cancel()
self.notify(
msg=f"Countdown [{self.name()}] paused, remaining: {format_time(self.remaining_time)}"
)
def time_elapsed(self):
self.notify(msg=f"Countdown [{self.name()}] finished")
play_sound(1)
self._to_remove = True
@classmethod
def icon(cls):
return countdown_path
def destroy(self):
self.timer.cancel()
self.notify(msg=f"Cancelling [{self.name()}]")
@overrides
def notify(self, msg):
notify(
app_name="Countdown", msg=msg, image=countdown_path,
)
def __str__(self):
s = get_as_subtext_field(play_icon(self._started))
s += get_as_subtext_field(self.name())
# compute remaining time
remaining_time = self.remaining_time
if self.started():
remaining_time -= time.time() - self.latest_start
s += f"Remaining: {format_time(remaining_time)}"
return s
countdowns: List[Countdown] = []
stopwatches: List[Stopwatch] = []
def all_watches() -> List[Union[Countdown, Stopwatch]]:
return [
*countdowns,
*stopwatches,
]
def create_stopwatch(name, *query_parts):
stopwatches.append(Stopwatch(name=name))
stopwatches[-1].start()
def create_countdown(name, *query_parts):
t = float(query_parts[0].strip()) * 60
countdowns.append(Countdown(name=name, count_from=t,))
countdowns[-1].start()
def delete_item(item: Union[Stopwatch, Countdown]):
item.destroy()
# TODO: could be neater..
if isinstance(item, Stopwatch):
stopwatches.remove(item)
else:
countdowns.remove(item)
def initialize():
"""Called when the extension is loaded (ticked in the settings) - blocking."""
# create plugin locations
for p in (
cache_path,
config_path,
data_path,
):
p.mkdir(
parents=False, exist_ok=True,
)
def finalize():
pass
def handleQuery(query,) -> list:
"""Hook that is called by albert with *every new keypress*.""" # noqa
results = []
if query.isTriggered:
try:
query.disableSort()
results_setup = setup(query)
if results_setup:
return results_setup
query_parts = query.string.strip().split()
name = None
if query_parts:
name = query_parts.pop(0)
subtext = f'Name: {name if name else "Not given"}'
results.extend(
[
v0.Item(
id=__title__,
icon=countdown_path,
text="Create countdown",
subtext=f'{subtext}{" - <u>Please provide a duration</u>" if not query_parts else ""}',
completion=__triggers__,
actions=[
v0.FuncAction(
"Create countdown",
lambda name=name, query_parts=query_parts: create_countdown(
name, *query_parts,
),
)
],
),
v0.Item(
id=__title__,
icon=stopwatch_path,
text="Create stopwatch",
subtext=subtext,
completion=__triggers__,
actions=[
v0.FuncAction(
"Create stopwatch",
lambda name=name, query_parts=query_parts: create_stopwatch(
name, *query_parts,
),
)
],
),
]
)
# cleanup watches that are done
for li in [
countdowns,
stopwatches,
]:
for watch in li:
if watch.to_remove():
li.remove(watch)
results.extend([get_as_item(item) for item in all_watches()])
except Exception: # user to report error
if dev_mode: # let exceptions fly!
print(traceback.format_exc())
raise
results.insert(
0,
v0.Item(
id=__title__,
icon=countdown_path,
text="Something went wrong! Press [ENTER] to copy error and report it",
actions=[
v0.ClipAction(
f"Copy error - report it to {__homepage__[8:]}",
f"{traceback.format_exc()}",
)
],
),
)
return results
# supplementary functions ---------------------------------------------------------------------
def get_as_item(item: Union[Countdown, Stopwatch]):
"""Return an item - ready to be appended to the items list and be rendered by Albert."""
actions = [v0.FuncAction("Remove", lambda: delete_item(item),)]
if item.started():
actions.append(v0.FuncAction("Pause", lambda: item.pause(),))
else:
actions.append(v0.FuncAction("Resume", lambda: item.start(),))
return v0.Item(
id=__title__,
icon=countdown_path if isinstance(item, Countdown) else stopwatch_path,
text=str(item),
subtext="",
completion=__triggers__,
actions=actions,
)
def get_as_subtext_field(field, field_title=None) -> str:
"""Get a certain variable as part of the subtext, along with a title for that variable."""
s = ""
if field:
s = f"{field} | "
else:
return ""
if field_title:
s = f"{field_title}: " + s
return s
def save_data(data: str, data_name: str):
"""Save a piece of data in the configuration directory."""
with open(config_path / data_name, "w",) as f:
f.write(data)
def load_data(data_name,) -> str:
"""Load a piece of data from the configuration directory."""
with open(config_path / data_name, "r",) as f:
data = f.readline().strip().split()[0]
return data
def setup(query):
"""Setup is successful if an empty list is returned.
Use this function if you need the user to provide you data
"""
results = []
return results
|
ppablocruzcobas/Dotfiles
|
albert/clock/__init__.py
|
__init__.py
|
py
| 11,096 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "gi.require_version",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "albert.cacheLocation",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "albert.configLocation",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "albert.dataLocation",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "threading.Timer",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "gi.repository.Notify.init",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "gi.repository.Notify",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "gi.repository.Notify.Notification.new",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "gi.repository.Notify.Notification",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Notify",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "abc.ABC",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "overrides.overrides",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "overrides.overrides",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "overrides.overrides",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "threading.Timer",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "overrides.overrides",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "overrides.overrides",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "overrides.overrides",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "albert.Item",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "albert.FuncAction",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "albert.Item",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "albert.FuncAction",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "traceback.format_exc",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "albert.Item",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "albert.ClipAction",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "traceback.format_exc",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "albert.FuncAction",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "albert.FuncAction",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "albert.FuncAction",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "albert.Item",
"line_number": 367,
"usage_type": "call"
}
] |
32584103329
|
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
from app import app
from apps import theme_explorer as te, text
import util
"""
=====================================================================
Helper functions and components
"""
df = px.data.gapminder()
code = util.get_code_file("dash_bootstrap_templates_app.py")
copy_code_div = util.get_copy_code_div(code, id="copy_template_code")
# make control panel
use_templates = dbc.RadioItems(
options=[
{"label": "Use figure templates from dash-bootstrap-templates", "value": 1},
{"label": "Use Plotly default figure template", "value": 2},
],
value=1,
id="use_figure_template",
)
control_panel_text = dcc.Markdown(
text.dash_bootstrap_templates_text, className="border mb-5 p-4"
)
# needed because the theme dropdown also updates "css" on Theme Explorer page but not here
dummy_output = html.Div(id="css", className='d-none')
control_panel = [control_panel_text, te.boostrap_card, use_templates, dummy_output]
carousel = dbc.Carousel(
ride="carousel",
items=[
{
"key": "1",
"src": "https://user-images.githubusercontent.com/72614349/129459807-30c22ffe-7a8c-44b9-9555-6cfd50ec355b.png",
},
{
"key": "2",
"src": "https://user-images.githubusercontent.com/72614349/129459808-40032148-82e1-47ce-a49a-05e598c69400.png",
},
],
)
carousel_text = dcc.Markdown(text.dash_bootstrap_templates_app_text)
"""
===============================================================================
Layout
"""
layout = dbc.Container(
[
util.header,
dbc.Row(
[
dbc.Col(control_panel, lg=4, sm=12),
dbc.Col(
html.Div(
id="db_templates_sample_app", className="mx-1 mb-4 shadow p-4",
),
lg=8,
sm=12,
),
],
),
dbc.Row(
[
dbc.Col([carousel_text, carousel], lg=4, sm=12),
dbc.Col(html.Div(copy_code_div,), lg=8, sm=12,),
],
),
],
fluid=True,
id="bootstrap_templates",
)
"""
=====================================================================
Display Sample App based on theme selected
"""
@app.callback(
Output("db_templates_sample_app", "children"),
Input("themes", "value"),
Input("use_figure_template", "value"),
)
def update_graphs(theme, use_template):
template = util.url_dbc_themes[theme].lower() if use_template == 1 else {}
heading_txt = (
"App with dash-bootstrap-templates"
if use_template == 1
else "App with Plotly default figure template"
)
heading = html.H3(heading_txt, className="bg-primary text-white p-2")
dff = df[df.year.between(1952, 1982)]
dff = dff[dff.continent.isin(df.continent.unique()[1:])]
line_fig = px.line(
dff,
x="year",
y="gdpPercap",
color="continent",
line_group="country",
template=template,
)
dff = dff[dff.year == 1982]
scatter_fig = px.scatter(
dff,
x="lifeExp",
y="gdpPercap",
size="pop",
color="pop",
size_max=60,
template=template,
).update_traces(marker_opacity=0.8)
avg_lifeExp = (dff["lifeExp"] * dff["pop"]).sum() / dff["pop"].sum()
map_fig = px.choropleth(
dff,
locations="iso_alpha",
color="lifeExp",
title="%.0f World Average Life Expectancy was %.1f years" % (1982, avg_lifeExp),
template=template,
)
hist_fig = px.histogram(
dff, x="lifeExp", nbins=10, title="Life Expectancy", template=template
)
graph_height = 300
graphs = html.Div(
[
dbc.Row(
[
dbc.Col(
dcc.Graph(figure=line_fig, style={"height": graph_height}), lg=6
),
dbc.Col(
dcc.Graph(figure=scatter_fig, style={"height": graph_height}),
lg=6,
),
],
className="mt-4",
),
dbc.Row(
[
dbc.Col(
dcc.Graph(figure=hist_fig, style={"height": graph_height}), lg=6
),
dbc.Col(
dcc.Graph(figure=map_fig, style={"height": graph_height}), lg=6
),
],
className="mt-4",
),
]
)
# These buttons are added to the app just to show the Boostrap theme colors
buttons = html.Div(
[
dbc.Button("Primary", color="primary", className="mr-1"),
dbc.Button("Secondary", color="secondary", className="mr-1"),
dbc.Button("Success", color="success", className="mr-1"),
dbc.Button("Warning", color="warning", className="mr-1"),
dbc.Button("Danger", color="danger", className="mr-1"),
dbc.Button("Info", color="info", className="mr-1"),
dbc.Button("Light", color="light", className="mr-1"),
dbc.Button("Dark", color="dark", className="mr-1"),
dbc.Button("Link", color="link"),
],
)
return [heading, buttons, graphs]
@app.callback(
Output("bootstrap_templates", "className"), Input("light_dark", "value"),
)
def update_css(value):
return "dbc_light" if value == "Light Themes" else "dbc_dark"
|
thigbee/dashBootstrapThemeExplorer
|
apps/bootstrap_templates.py
|
bootstrap_templates.py
|
py
| 5,729 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "plotly.express.data.gapminder",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "plotly.express.data",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "plotly.express",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "util.get_code_file",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "util.get_copy_code_div",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.RadioItems",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Markdown",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "apps.text.dash_bootstrap_templates_text",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "apps.text",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "dash_html_components.Div",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "apps.theme_explorer.boostrap_card",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "apps.theme_explorer",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "dash_bootstrap_components.Carousel",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Markdown",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "apps.text.dash_bootstrap_templates_app_text",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "apps.text",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "dash_bootstrap_components.Container",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "util.header",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "dash_bootstrap_components.Row",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Col",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Col",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Row",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Col",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Col",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "util.url_dbc_themes",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "dash_html_components.H3",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "plotly.express.line",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "plotly.express.scatter",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "plotly.express.choropleth",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "plotly.express.histogram",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "dash_html_components.Div",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Row",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Col",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Col",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Row",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Col",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Col",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Button",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Button",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Button",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Button",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Button",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Button",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Button",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Button",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.Button",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "app.app.callback",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "app.app",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "app.app.callback",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "app.app",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 196,
"usage_type": "call"
}
] |
8588345616
|
from collections import namedtuple
from datetime import datetime
from time import sleep
from timeit import default_timer as timer
import re
import requests
def _request_matches(r, regexp) -> bool:
"""Check if request has data and that data matches give regular expresssion
Args:
r: HTTP call result from a status provider, must implement raise_for_status() and .text
regexp: Compiler regular expression to search for in the HTTP request text field.
Returns:
True if a match is found, false if not and None if request contains no .text property
"""
try:
r.raise_for_status()
text = r.text
return regexp.search(text) is not None
except:
return None
def check_status(url: str, regexp, status_provider, src='localhost') -> dict:
"""Check status code of a given Url
Args:
url: URL-string of a resource to check with HTTP GET request.
regexp: Regular expression to check respose against if any
src: Identifier of a requestor used for reporting and returned as result.src
status_provider: Callable used to get a status of a resource.
Returns:
Object representing a status of the given resource
"""
ts = datetime.now()
start_time = timer()
r = status_provider(url)
end_time = timer()
return {
'timestamp': str(ts),
'src': src,
'target': url,
'time': (end_time - start_time),
'code': r.status_code,
'has_match': _request_matches(r, regexp) if regexp else None
}
class RestStatusPoller:
"""A source of REST-resourse status checks.
This Source is issuing REST Get requests to a give resource URL
and yelds a dict descriding resource status.
The source is designed to be used as iterable:
for data in source:
process(data)
Keyword Arguments:
url: URL of the resource to check status
interval: (int or None): time is sec to wait before the next check.
If None is given, the check is performed only once.
regexp (str or None): regular expression to search for in the response body, if any.
If None is given - no search is performed and 'has_match' field of the status
responce is set to None
provider(callable on None): a resource status provider override.
If None is give - requests.get is used. Default is None.
"""
def __init__(self, url, interval, regexp, provider=None):
self.url = url
self.interval = interval
self.pattern = re.compile(regexp) if regexp else None
self.__value_provide = provider or (lambda x: requests.get(
x, headers={'content-type': 'application/json'}))
self.__iter_count = 0
def __iter__(self):
return self
def __next__(self):
if self.__iter_count > 0:
if self.interval is not None:
sleep(self.interval)
else:
raise StopIteration()
self.__iter_count += 1
return check_status(self.url, self.pattern, self.__value_provide)
|
abbyssoul/site_check
|
site_checker/rest_source.py
|
rest_source.py
|
py
| 3,130 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "timeit.default_timer",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 90,
"usage_type": "call"
}
] |
3848748609
|
from setuptools import setup, Extension
condor_module = Extension('condor',
sources=['c/condor.c', 'c/glutils.c'],
libraries=['GLEW', 'glfw'])
setup (name='Condor',
version='0.1',
description='',
ext_modules=[condor_module])
|
enricozb/Condor
|
condor/setup.py
|
setup.py
|
py
| 301 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "setuptools.Extension",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
}
] |
39763998514
|
import streamlit as st
import os
from PIL import Image
from ultralytics import YOLO
import re
# Load the model
model = YOLO("model.pt")
# Set the path for results
output_dir = 'temp_out_res'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Function to predict images
def predict_image(image_path):
results = model.predict(source=image_path)
input_filename = os.path.basename(results[0].path)
annotated_img = results[0].plot()
pil_image = Image.fromarray(annotated_img[..., ::-1])
pil_image.save(os.path.join(output_dir, input_filename))
total_polis = 0
total_monos = 0
polis_index = 0
monos_index = 1
verbose_output = results[0].verbose()
polis_match = re.search(r'(\d+) poli', verbose_output)
monos_match = re.search(r'(\d+) mono', verbose_output)
if polis_match:
total_polis += int(polis_match.group(1))
if monos_match:
total_monos += int(monos_match.group(1))
if total_polis + total_monos == 0:
polis_percentage = 0
else:
polis_percentage = (total_polis / (total_polis + total_monos)) * 100
return os.path.join(output_dir, input_filename), total_polis, total_monos, polis_percentage
# Main Streamlit function
def main():
st.title("EndoScan: YOLO Subclinical Endometritis Detector")
uploaded_file = st.file_uploader("Choose an image for prediction", type=['jpg', 'jpeg', 'png'])
if uploaded_file is not None:
image_path = os.path.join(output_dir, uploaded_file.name)
with open(image_path, 'wb') as f:
f.write(uploaded_file.getbuffer())
st.image(image_path, caption='Uploaded image.', use_column_width=True)
if st.button("Predict"):
pred_img_path, polis_count, monos_count, polis_perc = predict_image(image_path)
st.image(pred_img_path, caption='Predicted image.', use_column_width=True)
st.write(f"Total count of polymorphonuclear cells: {polis_count}")
st.write(f"Total count of mononuclear cells: {monos_count}")
st.write(f"Percentage of polymorphonuclear cells: {polis_perc:.2f}%")
if __name__ == '__main__':
main()
|
DawidTobolski/YOLO_cell
|
YOLO_cell.py
|
YOLO_cell.py
|
py
| 2,252 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "ultralytics.YOLO",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "streamlit.title",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "streamlit.file_uploader",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "streamlit.image",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "streamlit.button",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "streamlit.image",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 60,
"usage_type": "call"
}
] |
30728277710
|
import fileinput
from typing import Counter
ll = [l.strip() for l in fileinput.input()]
numbers = []
for line_nr in range(len(ll)):
l = ll[line_nr]
numbers = [int(x) for x in l.split(',')]
def count_fishes(days):
dd = Counter(numbers)
for _ in range(days):
new_fishes = dd[0]
for i in range(0, 8):
dd[i] = dd[i+1]
dd[6] += new_fishes
dd[8] = new_fishes
return sum([dd[i] for i in dd])
print(count_fishes(80), count_fishes(256))
|
mdaw323/alg
|
adventofcode2021/6.py
|
6.py
|
py
| 498 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fileinput.input",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "typing.Counter",
"line_number": 13,
"usage_type": "call"
}
] |
43599298295
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import os
from time import sleep
import smtplib
url = "https://www.coronatracker.com/pt-br/"
driver = webdriver.Chrome()
driver.get(url)
sleep(5)
save = driver.find_element_by_xpath('//*[@id="__layout"]/div/main/div/div[1]/div[1]/div[1]/div[2]/div[2]/div[1]/span')
death = driver.find_element_by_xpath('//*[@id="__layout"]/div/main/div/div[1]/div[1]/div[1]/div[2]/div[3]/div[1]/span')
cases = driver.find_element_by_xpath('//*[@id="__layout"]/div/main/div/div[1]/div[1]/div[1]/div[2]/div[1]/div[1]/span')
casestext = cases.text
savetext = save.text
deathtext = death.text
print('Casos Confirmados: {}'.format(casestext))
print('Casos Recuperados: {}'.format(savetext))
print('Mortes: {}'.format(deathtext))
msg = '\nCasos Confirmados:\n'+ casestext +'\nCasos salvos:\n'+ savetext + '\nMortos:\n' + deathtext
def sendmail():
smtp = "smtp.gmail.com"
server = smtplib.SMTP_SSL (smtp, 465)
server.login('[email protected]', 'vini2604')
server.sendmail('[email protected]','[email protected]',msg)
server.quit()
print ('EMAIL ENVIADO COM URGÊNCIA')
print (sendmail())
|
vinihtao/Projeto-Webscrapping
|
CasosCOVID.py
|
CasosCOVID.py
|
py
| 1,266 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "smtplib.SMTP_SSL",
"line_number": 35,
"usage_type": "call"
}
] |
13658425408
|
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
def summarize_qc_resamples(input_df, verbose=False, **resample_kwargs):
time_list = list()
data_list = list()
for time, df in input_df.resample(**resample_kwargs):
if verbose == True:
print("Currently working on: {}".format(time))
time_list.append(time)
df_stats = df.qc.describe()
data_list.append(df_stats.values)
else:
measures = df_stats.index.to_list()
variables = df.columns.to_list()
attrs = resample_kwargs
return xr.DataArray(np.dstack(data_list),
coords = [measures, variables, time_list],
dims = ['measure','variable','time'],
name = "qc_summary",
attrs = attrs)
|
wangsen992/pyqc
|
src/pyqc/tools.py
|
tools.py
|
py
| 855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "xarray.DataArray",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.dstack",
"line_number": 23,
"usage_type": "call"
}
] |
26287041907
|
import sys
import matplotlib.pyplot as plt
import numpy as np
import os
# this program reads input from a script which has assessed how networks react to a particular combination of gradient and division status
# the script has produced for each network a matrix with 0 (migrate) and 1 (divide), which this program will plot and find the consensus for.
if len(sys.argv) <3:
print ("This is the program 'plot_netanalysis_jan.py'")
print ("Usage: ./plot_netanalysis_jan.py <output_file> <plot individuals?> <input filenames>")
sys.exit(1)
else:
outputfile=sys.argv[1]
indiplot=int(sys.argv[2])
arraystorage=[]
filestorage=[]
init=0
count=0
sizes=None
consensus=None
for filename in sys.argv[3:]:
#print ("{}".format(filename))
divmig = np.loadtxt(filename, dtype='i', delimiter='\t')
#print sizes
if not init:
sizes = np.shape(divmig[1:,1:])
consensus=np.zeros((sizes[0]*sizes[1],),dtype=int)
init=1
outfile=os.path.splitext(filename)[0]
#for if you still need to plot the individuals:
if (indiplot):
fig=plt.figure() #!
fig.set_size_inches(1, 1.*sizes[0]/sizes[1], forward = False) #!
ax = plt.Axes(fig, [0., 0., 1., 1.]) #!
ax.set_axis_off() #!
fig.add_axes(ax) #!
ax.imshow(divmig[1:,1:], cmap='RdYlBu', origin='lower')
divshare=divmig[1:,1:].sum()
migshare=(sizes[0])*(sizes[1])-divshare
migs="%04d" % (migshare,)
#print divs
plt.savefig("div_"+str(migs)+"_"+outfile+".pdf", dpi=sizes[1]) #bbox_inches='tight'
plt.close()
binarystring=divmig[1:,1:].flatten()
consensus=np.add(binarystring, consensus)
#print ("{}".format(consensus))
arraystorage.append(binarystring)
filestorage.append(outfile)
count+=1
#find the consensus sequence
bool_consensus= consensus > count/2
print ("{}".format(bool_consensus))
consensus_sequence=bool_consensus.astype(int)
print ("consensus is {}".format(consensus_sequence))
wfilename="consensussequence_"+outputfile+".dat"
writefile=open(wfilename,"w")
for el in consensus_sequence:
writefile.write(str(el)+" ")
writefile.close()
#display consensus image
imcons=np.reshape(consensus_sequence,sizes)
fig=plt.figure() #!
fig.set_size_inches(1, 1.*sizes[0]/sizes[1], forward = False) #!
ax = plt.Axes(fig, [0., 0., 1., 1.]) #!
ax.set_axis_off() #!
fig.add_axes(ax) #!
ax.imshow(imcons, cmap='RdYlBu', origin='lower')
#outfile=os.path.splitext(outputfile)[0]
plt.savefig("consensus"+"_"+outputfile+".pdf", dpi=sizes[1]) #bbox_inches='tight'
plt.close()
#find for each individual the distance to the consensus sequence
#writefile=open(outputfile, "w")
#fig=plt.figure() #
#hamms=[]
minhamm=999999999
for fi,seq in zip(filestorage, arraystorage):
hamm=np.count_nonzero(seq!=consensus_sequence)
if hamm<minhamm:
minhamm=hamm
minfile=fi
print ("file with individual closest to consensus: {}".format(minfile))
# hamms.append[hamm]
#writefile.write(fi+"\t"+str(hamm)+"\n")
#maxbina=max(hamms)
#hista, bin_edgesa = np.histogram(hamms, bins = range(maxbina))
#plt.plot(bin_edgesa[:-1],hista)
#writefile.close()
|
RenskeVroomans/regulation_evolution
|
scripts/plot_netanalysis_jan.py
|
plot_netanalysis_jan.py
|
py
| 3,203 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.loadtxt",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.Axes",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "numpy.add",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.Axes",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 95,
"usage_type": "call"
}
] |
19504742337
|
from igraph import Graph
from igraph import plot
grafo = Graph(edges = [(0,1),(2,3),(0,2),(0,3)], directed = True)
grafo.vs['label'] =['Fernando', 'Pedro', 'Jose', 'Antonio']
grafo.vs['nota'] = [100, 40, 60, 20]
grafo.es['tipoAmizade'] = ['Amigo', 'Inimigo', 'Amigo']
grafo.es['devendo'] = [1,3,2,5]
grafo.vs['color'] = ['red', 'yellow','orange', 'green']
plot(grafo, bbox =(300,300),
vertex_size = grafo.vs['nota'],
edge_width = grafo.es['devendo'],
vertex_color = grafo.vs['color'],
edge_curved = 0.4,
vertex_shape = 'square')
|
guibarreta1993Average/data_science_udemy
|
05_Grafos/aula34_impressao.py
|
aula34_impressao.py
|
py
| 557 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "igraph.Graph",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "igraph.plot",
"line_number": 12,
"usage_type": "call"
}
] |
74190873788
|
__author__ = "ALEX-CHUN-YU ([email protected])"
from sklearn.datasets import load_wine
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.model_selection import validation_curve
from sklearn.model_selection import GridSearchCV
from sklearn_evaluation.plot import grid_search
import numpy as np
import matplotlib.pyplot as plt
from sklearn.externals import joblib
import json
# Random Forest Classifier Alogorithm
class RFC():
# RFC Initialize
def __init__(self, name):
self.model_name = 'model/' + name + '_rfc'
self.image_name = 'image/' + name + '_rfc'
# RFC Parameter
self.n_estimators = 10
self.criterion = 'gini'
self.max_depth = None
self.min_samples_split = 2
self.min_samples_leaf = 1
self.min_weight_fraction_leaf = 0.0
self.max_features = 'auto'
self.max_leaf_nodes = None
self.min_impurity_decrease = 0.0
self.min_impurity_split = None
self.bootstrap = True
self.oob_score = False
self.n_jobs = -1
self.random_state = None
self.verbose = 0
self.warm_start = False
self.class_weight = None
#(Validation Parameter) GridSearchCV, validation_curve
self.cv = 10
self.criterion_range = ['gini', 'entropy'] # 2 * 3
self.max_features_range = ['sqrt', 'log2', 'auto']
self.n_estimators_range = [10, 50, 100, 700, 1000]# 5
# Accuracy(GridSearchCV application)
self.score = 0
self.scoring = 'accuracy'# f1、recall、 precision, your target must binary in sklearn(但貼心的 sklearn 還是有提供 f1_micro、f1_macro...)
# Normalization
self.normalization = False
# Find Best Parameter(RFC 有沒有 normalization 都沒差? 暫且留著)
def tuning_parameters(self, X, y):
# 第一次 tuning (找出 best n_estimators 和 best max_features)
# n_estimators 叢林中要有幾顆樹(default = 10)
# criterion 計算資訊量的的方式(劃分樹分支時所需要的), gini 或 entropy(default = 'gini')
# max_features 選擇最適合屬性時劃分的特徵不能超過此值
clf = RandomForestClassifier(n_estimators = self.n_estimators, criterion = self.criterion, max_depth = self.max_depth,
min_samples_split = self.min_samples_split, min_samples_leaf = self.min_samples_leaf,
min_weight_fraction_leaf = self.min_weight_fraction_leaf, max_features = self.max_features,
max_leaf_nodes = self.max_leaf_nodes, min_impurity_decrease = self.min_impurity_decrease,
min_impurity_split = self.min_impurity_split, bootstrap = self.bootstrap, oob_score = self.oob_score,
n_jobs = self.n_jobs, random_state = self.random_state, verbose = self.verbose,
warm_start = self.warm_start, class_weight = self.class_weight)
parameter_candidates = {# Set the parameter candidates
'n_estimators': self.n_estimators_range,
'criterion': self.criterion_range,
'max_features': self.max_features_range}
clf_gscv = GridSearchCV(estimator = clf, param_grid = parameter_candidates, cv = self.cv, scoring = self.scoring, n_jobs = self.n_jobs)# Create a classifier with the parameter candidates
clf_gscv.fit(X, y)# No Normalization
normalization_clf_gscv = clf_gscv
normalization_clf_gscv.fit(preprocessing.scale(X), y)# Normalization
if normalization_clf_gscv.best_score_ > clf_gscv.best_score_:
self.normalization = True
X = preprocessing.scale(X)
self.n_estimators = normalization_clf_gscv.best_estimator_.n_estimators
self.criterion = normalization_clf_gscv.best_estimator_.criterion
self.max_features = normalization_clf_gscv.best_estimator_.max_features
self.score = normalization_clf_gscv.best_score_
clf = normalization_clf_gscv
else:
self.n_estimators = clf_gscv.best_estimator_.n_estimators
self.criterion = clf_gscv.best_estimator_.criterion
self.max_features = clf_gscv.best_estimator_.max_features
self.score = clf_gscv.best_score_
clf = clf_gscv
# # Print out the results
# print('Best score for training data:', clf_gscv.best_score_)
# print('Best n_estimators:',clf_gscv.best_estimator_.n_estimators)
# print('Best max_features:',clf_gscv.best_estimator_.max_features)
# print(normalization_clf_gscv.best_score_)
# print(clf.cv_results_['params'])
criterion = [x['criterion'] for x in clf.cv_results_['params']]
# print(criterion)
max_features = [x['max_features'] for x in clf.cv_results_['params']]
# print(max_features)
plt.title("Validation Curve with RFC")
plt.xlabel("Value Of n_estimators For RFC")
plt.ylabel(self.scoring)
# 6 * 5
mean_scores = np.array(clf.cv_results_['mean_test_score']).reshape(len(self.criterion_range) * len(self.max_features_range), len(self.n_estimators_range))
std_scores = np.array(clf.cv_results_['std_test_score']).reshape(len(self.criterion_range) * len(self.max_features_range), len(self.n_estimators_range))
# print(mean_scores)
# print(std_scores)
ind = 0
for i in range(0, len(criterion), len(self.n_estimators_range)):
plt.plot(self.n_estimators_range, mean_scores[ind], "-o", label = 'criterion: ' + criterion[i] + ', max_features: ' + max_features[i])
plt.fill_between(self.n_estimators_range, mean_scores[ind] - std_scores[ind],
mean_scores[ind] + std_scores[ind], alpha = 0.2)
ind += 1
plt.legend(loc = "best") # best location
plt.savefig(self.image_name + '.png')# save image
plt.close()
print("RFC Save Image Finished")
print("RFC Tuning Parameters Finished")
# Produce Model
def train(self, X, y):
# Train
clf = RandomForestClassifier(n_estimators = self.n_estimators, criterion = self.criterion, max_depth = self.max_depth,
min_samples_split = self.min_samples_split, min_samples_leaf = self.min_samples_leaf,
min_weight_fraction_leaf = self.min_weight_fraction_leaf, max_features = self.max_features,
max_leaf_nodes = self.max_leaf_nodes, min_impurity_decrease = self.min_impurity_decrease,
min_impurity_split = self.min_impurity_split, bootstrap = self.bootstrap, oob_score = self.oob_score,
n_jobs = self.n_jobs, random_state = self.random_state, verbose = self.verbose,
warm_start = self.warm_start, class_weight = self.class_weight)
if self.normalization == True:
X = preprocessing.scale(X)
clf.fit(X, y)
# 透過 joblib 存 model
joblib.dump(clf, self.model_name + '.pkl')
print("RFC Save Model Finished")
# 儲存參數、準確性
parameters = {}
parameters['parameters'] = []
parameters['parameters'].append({
'n_estimators': self.n_estimators,
'criterion': self.criterion,
'max_features': self.max_features,
})
parameters['scoring'] = []
parameters['scoring'].append({
'valid_score': self.score
})
parameters['preprocessing'] = []
parameters['preprocessing'].append({
'normalization': self.normalization
})
with open(self.model_name + '_parameters', 'w', encoding = "utf-8") as rfcf:
json.dump(parameters, rfcf)
print("RFC Save Parameters Finished")
if __name__ == '__main__':
X, y = load_wine().data, load_wine().target
name = 'wine'
rfc = RFC(name)
rfc.tuning_parameters(X, y)
rfc.train(X, y)
# 載入參數並顯示出來
with open(rfc.model_name + '_parameters') as json_file:
data = json.load(json_file)
for p in data['parameters']:
print('n_estimators: ' + str(p['n_estimators']))
print('criterion: ' + p['criterion'])
print('max_features: ' + p['max_features'])
# 不同的評分標準 key 要做更改
for s in data['scoring']:
print('valid_score: ' + str(s['valid_score']))
for p in data['preprocessing']:
print('normalization: ' + str(p['normalization']))
normalization = p['normalization']
# 載入 model 並去預測
if normalization == True:
X = preprocessing.scale(X)
rfc = joblib.load(rfc.model_name + '.pkl')
print(rfc.score(X, y))
|
Alex-CHUN-YU/Recommender-System
|
scenario_algorithm_analysis/rfc.py
|
rfc.py
|
py
| 9,077 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.GridSearchCV",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.scale",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "sklearn.preprocessing.scale",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.fill_between",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.scale",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "sklearn.externals.joblib.dump",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "sklearn.externals.joblib",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.load_wine",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.scale",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "sklearn.externals.joblib.load",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "sklearn.externals.joblib",
"line_number": 171,
"usage_type": "name"
}
] |
38353405555
|
import requests
from bs4 import BeautifulSoup #screen-scraping library
#request = requests.get("http://www.google.com")
request = requests.get("https://www.johnlewis.com/house-by-john-lewis-curve-dining-chair-white/p231441579")
content = request.content #getting content of the page
soup = BeautifulSoup(content, "html.parser")
element = soup.find("span",{"itemprop":"price","class":"now-price"}) #dictionary
#print(element.text.strip())
string_price = element.text.strip() #"#£19.00"
price_without_symbol = string_price[1:]
price = (float(price_without_symbol))
if price < 50:
print("You should buy the chair!")
print("The current price is {}.".format(string_price))
else:
print("Don't buy the chair!!")
# <span itemprop="price" class="now-price"> £19.00 </span>
#print(request.content)
|
BrayoKane/python-mongo
|
price-of-a-chair/src/app.py
|
app.py
|
py
| 811 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 8,
"usage_type": "call"
}
] |
712141287
|
#! /usr/bin/env python3
# coding: utf-8
import os
import logging as lg
import pandas as pd
import numpy as np
lg.basicConfig(level=lg.DEBUG)
import os
import pandas as pd
class SetOfParliamentMembers:
def __init__(self, name):
self.name = name
def __repr__(self):
return "setOfParliamentMember: {} members".format(len(self.dataframe))
def data_from_csv(self, csv_file):
self.dataframe = pd.read_csv(csv_file, sep=";", engine = 'python')
def data_from_dataframe(self, dataframe):
self.dataframe = dataframe
def display_chart(self):
# à venir, patience !
pass
def split_by_political_party(self):
result = {}
data = self.dataframe
all_parties = data["parti_ratt_financier"].dropna().unique()
for party in all_parties:
data_subset = data[data.parti_ratt_financier == party]
subset = SetOfParliamentMembers('MPs from party "{}"'.format(party))
subset.data_from_dataframe(data_subset)
result[party] = subset
return result
def launch_analysis(data_file, by_party=False, info=False):
sopm = SetOfParliamentMembers("All MPs")
sopm.data_from_csv(os.path.join("data", data_file))
sopm.display_chart()
if by_party:
for party, s in sopm.split_by_political_party().items():
s.display_chart()
if info:
print(sopm)
if __name__ == "__main__":
launch_analysis("current_mps.csv")
|
honorezemagho/python-oc
|
analysis/csv.py
|
csv.py
|
py
| 1,496 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "attribute"
}
] |
7276876468
|
from django.db import models
from django.contrib.auth.models import User
class Animal(models.Model):
"""Класс описывает объект Животное"""
owner = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="Владелец")
species = models.CharField(max_length=30, verbose_name="Вид животного")
name = models.CharField(max_length=30, verbose_name="Кличка")
birth = models.DateField(verbose_name="Дата рождения")
breed = models.CharField(max_length=30, verbose_name="Порода")
gender = models.CharField(
max_length=10, choices=[("Ж", "Женский"), ("М", "Мужской")], verbose_name="Пол"
)
class Meta:
verbose_name = "Животное"
verbose_name_plural = "Животные"
def __str__(self):
return self.name
class Vaccination(models.Model):
"""Класс описывающий объект Вакцинация"""
animal = models.ForeignKey(
Animal, on_delete=models.CASCADE, verbose_name="Животное"
)
date = models.DateField(verbose_name="Дата прививки")
vaccine = models.CharField(max_length=50, verbose_name="Вакцина")
class Meta:
verbose_name = "Вакцинация"
verbose_name_plural = "Вакцинация"
def __str__(self):
return f"{self.date}"
class Treatment(models.Model):
"""Класс описывающий объект Обратока от паразитов"""
animal = models.ForeignKey(
Animal, on_delete=models.CASCADE, verbose_name="Животное"
)
parasite_type = models.CharField(
max_length=10,
choices=[("Гельминты", "Гельминты"), ("Клещи", "Клещи")],
verbose_name="Вид паразитов",
)
date = models.DateField(verbose_name="Дата обработки")
medication = models.CharField(max_length=50, verbose_name="Препарат")
dosage = models.CharField(max_length=10, verbose_name="Дозировка")
class Meta:
verbose_name = "Обработка от паразитов"
verbose_name_plural = "Обработка от паразитов"
def __str__(self):
return f"{self.date}"
|
Gamilkar/animal_medical_record
|
main/models.py
|
models.py
|
py
| 2,320 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.CharField",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateField",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateField",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 55,
"usage_type": "name"
}
] |
31148205537
|
import argparse
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
import pandas as pd
import numpy as np
import json
import os
def parse_args():
parser = argparse.ArgumentParser(prog='')
parser.add_argument('json', type=str, help='Figure1 JSON.')
parser.add_argument('-o', '--output_dir', default='.', help='')
args = parser.parse_args()
return(args)
def b(paths, outfile):
dar_enrich = pd.read_csv(paths['figure6']['b']['dar_enrichment'], sep='\t')
fp_enrich = pd.read_csv(paths['figure6']['b']['footprint_enrichment'], sep='\t')
f, axes = plt.subplots(1,2, num='b', figsize=(12, 6))
fp_logp = fp_enrich['pval_enrichment'].map(lambda x: -1*np.log10(x))
fp_logp = fp_logp.rename('footprint enrichments')
dar_logp = dar_enrich['pval_enrichment'].map(lambda x: -1*np.log10(x))
dar_logp.sort_values(ascending=False, inplace=True)
dar_logp = dar_logp.rename('top DAR enrichments')
dar_logp = dar_logp[:10]
sns.set_style("whitegrid")
sns.kdeplot(dar_logp, shade=True, color="#E74C3C", ax=axes[0])
sns.kdeplot(fp_logp, shade=True, color="#3498DB", ax=axes[0])
axes[0].set_xlabel('-log10 pval', fontsize=15)
def label_point(x, y, val, ax):
a = pd.concat({'x': x, 'y': y, 'val': val}, axis=1)
for i, point in a.iterrows():
ax.text(point['x']+.02, point['y'], str(point['val']), fontsize=10)
def rand_jitter(arr, c):
stdev = c*(max(arr)-min(arr))
return arr + stdev
fp_enrich['pval_enrichment'] = -1*np.log10(fp_enrich['pval_enrichment'])
fp_enrich.sort_values('pval_enrichment', ascending=False, inplace=True)
fp_enrich.reset_index(drop=True, inplace=True)
sns.scatterplot(x=fp_enrich.index.tolist(), y='pval_enrichment', data=fp_enrich, ax=axes[1])
# label_point(pd.Series(fp_enrich.index.tolist()[:10]), fp_enrich['pval_enrichment'][:10], fp_enrich['name'][:10], axes[1])
axes[1].set_xticks=''
f.savefig(outfile, dpi=300)
def c(paths, outfile):
fp_enrich = pd.read_csv(paths['figure6']['c'], sep='\t')
hic_hit = fp_enrich[fp_enrich['name']=='ZNF416-Zf']
hic_df = pd.melt(hic_hit, id_vars=None, value_vars=['target_freq', 'bg_freq'], var_name='enrichment group', value_name='% total footprints')
hic_df.sort_values('enrichment group', inplace=True)
sns.set_style("whitegrid")
f, axes = plt.subplots(1,1, num='c', figsize=(12, 12))
palette = ['#ABB2B9','#A569BD']
sns.barplot(x='enrichment group', y='% total footprints', data=hic_df, palette=palette, ax=axes)
axes.set_xlabel('', fontsize=15)
axes.set_xticks = ''
axes.set_xticklabels([])
axes.set_ylabel('')
f.savefig(outfile, dpi=300)
def main():
args = parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(args.json) as fp:
paths = json.load(fp)
bof = os.path.join(args.output_dir, 'Figure6b.png')
cof = os.path.join(args.output_dir, 'Figure6c.png')
b(paths, bof)
c(paths, cof)
if __name__ == '__main__':
main()
|
perezja/Leukos
|
presentation/figure6/figure6.py
|
figure6.py
|
py
| 3,116 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "numpy.log10",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "seaborn.set_style",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "seaborn.kdeplot",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "seaborn.kdeplot",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "seaborn.scatterplot",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pandas.melt",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "seaborn.set_style",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "seaborn.barplot",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 95,
"usage_type": "attribute"
}
] |
74022415547
|
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from apps.celery_task.models import PeriodicTask
from apps.celery_task.serializers.periodic_task_serializer import PeriodicTaskSerializer, CreatePeriodicTaskSerializer
from packages.drf.pagination import CustomPageNumberPagination
from packages.drf.renderers import CustomRenderer
from packages.drf.viewsets import ModelViewSet
from django_filters import FilterSet
class PeriodicTaskFilter(FilterSet):
class Meta:
model = PeriodicTask
fields = {"name": ["exact"], "creator": ["contains"]}
class PeriodicTaskViewSet(ModelViewSet):
permission_classes = [AllowAny]
queryset = PeriodicTask.objects.all()
serializer_class = PeriodicTaskSerializer
pagination_class = CustomPageNumberPagination
renderer_classes = (CustomRenderer,)
filter_class = PeriodicTaskFilter
ordering_fields = ["id"]
ordering = ["-id"]
def create(self, request, *args, **kwargs):
serializer = CreatePeriodicTaskSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
name = serializer.validated_data["name"]
creator = "test"
serializer.validated_data["name"] = name
serializer.validated_data["creator"] = creator
instance = serializer.save()
instance.set_enabled(True)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@action(methods=["post"], detail=False)
def create_task(self, request, *args, **kwargs):
"""创建任务
{
"name": "test",
"cron": {"minute":"*/5","hour":"*","day_of_week":"*","day_of_month":"*","month_of_year":"*"},
}
"""
params = request.data
cron_data = params.get("cron")
name = params.get("name")
creator = params.get("creator", "test")
periodic_task = PeriodicTask.objects.create_task(name, cron_data, creator)
periodic_task.set_enabled(True)
return Response({"result": "创建成功"})
|
yaowuya/django-major-core
|
apps/celery_task/views/periodic_task_view.py
|
periodic_task_view.py
|
py
| 2,133 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django_filters.FilterSet",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "apps.celery_task.models.PeriodicTask",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "packages.drf.viewsets.ModelViewSet",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.AllowAny",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "apps.celery_task.models.PeriodicTask.objects.all",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "apps.celery_task.models.PeriodicTask.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "apps.celery_task.models.PeriodicTask",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "apps.celery_task.serializers.periodic_task_serializer.PeriodicTaskSerializer",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "packages.drf.pagination.CustomPageNumberPagination",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "packages.drf.renderers.CustomRenderer",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "apps.celery_task.serializers.periodic_task_serializer.CreatePeriodicTaskSerializer",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "apps.celery_task.models.PeriodicTask.objects.create_task",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "apps.celery_task.models.PeriodicTask.objects",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "apps.celery_task.models.PeriodicTask",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.action",
"line_number": 41,
"usage_type": "call"
}
] |
5657507234
|
import os
from functools import reduce
class Photo:
id = None
layout = None # v or h
tags = []
def __init__(self, id, layout, tags):
self.id = id
self.layout = layout
# self.tagalf = "".join(sorted(tags))
self.tagalf = tuple(sorted(tags))
self.tags = tags
def __str__(self):
return str(self.id) + " - " + " ".join(self.tags)
class Slide:
# 2 vertical or 1 horizontal
photo_ids = []
tags = []
def __init__(self, photos):
self.photo_ids = [str(photo.id) for photo in photos]
self.tags = set(reduce(list.__add__, map(lambda x: list(x.tags), photos)))
self.tags_sorted = tuple(sorted(list(self.tags)))
def __str__(self):
return " ".join([str(x) for x in self.photo_ids]) + " - " + " ".join([str(x) for x in self.tags])
class SlideShow:
slides = []
def __init__(self, slides=None):
self.slides = [] if slides is None else slides
def calculate_score(self):
if len(self.slides) == 0:
return 0
score = 0
for i, slide in enumerate(self.slides):
score += self.interest_factor(i)
return score
def interest_factor(self, i):
if i + 1 >= len(self.slides):
return 0
return interest_factor(self.slides[i], self.slides[i + 1])
def interest_factor(slide_1, slide_2):
""" interest of slides
Minimum between
the number of common tags between Si and Si+1
the number of tags in Si but not in Si+1
the number of tags in Si+1 but not in Si
"""
common = set(slide_1.tags) & set(slide_2.tags)
n_common = len(common)
n_left = len(slide_1.tags) - len(set(slide_1.tags) & set(common))
n_right = len(slide_2.tags) - len(set(common) & set(slide_2.tags))
return min(n_common, n_left, n_right)
def n_common_tags(slide_1, slide_2):
# return len(set(slide_1.tags) & set(slide_2.tags))
return len(set(slide_1.tags).intersection(slide_2.tags))
def read_input(filepath):
with open(filepath, 'r') as f:
n = int(f.readline())
i = 0
result = []
while i < n:
line = f.readline()[:-1].split(" ")
result.append(Photo(i, line[0], line[2:]))
i += 1
return result
def write_output(slideshow, output_file):
with open(output_file, "w") as f:
f.write(str(len(slideshow.slides)) + "\n")
for slide in slideshow.slides:
f.write(' '.join(slide.photo_ids) + "\n")
with open(output_file, 'rb+') as f:
f.seek(-2, os.SEEK_END)
f.truncate()
def get_slideshow(photos):
slideshow = SlideShow()
vert = None
slides = []
for photo in sorted(photos, key=lambda x: x.tagalf):
if photo.layout == "H":
slides.append(Slide([photo]))
elif photo.layout == "V" and vert is None:
vert = photo
elif photo.layout == "V" and vert is not None:
slides.append(Slide([photo, vert]))
vert = None
slides.sort(key=lambda x: x.tags_sorted)
return SlideShow(slides)
def main():
files = ['a_example.txt', 'b_lovely_landscapes.txt', 'c_memorable_moments.txt', 'd_pet_pictures.txt',
'e_shiny_selfies.txt']
sum_score = 0
for file in files:
print(file)
photos = read_input(file)
slideshow = get_slideshow(photos)
score = slideshow.calculate_score()
sum_score += score
print("SCORE: {}\n".format(score))
write_output(slideshow, "output/" + file)
print("END, {}".format(sum_score))
return None
if __name__ == "__main__":
main()
|
phyx4/hashcode_2019
|
main.py
|
main.py
|
py
| 3,664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "functools.reduce",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.SEEK_END",
"line_number": 93,
"usage_type": "attribute"
}
] |
5792709387
|
from collections import OrderedDict
from rest_framework import serializers, relations
class RelatedField(serializers.PrimaryKeyRelatedField):
def __init__(self, **kwargs):
self.serializer = kwargs.pop("serializer", None)
self.lookup = kwargs.pop("lookup", "id")
if self.serializer is not None:
assert issubclass(
self.serializer, serializers.Serializer
), '"serializer" is not a valid serializer class'
assert hasattr(
self.serializer.Meta, "model"
), 'Class {serializer_class} missing "Meta.model" attribute'.format(
serializer_class=self.serializer.__class__.__name__
)
if not kwargs.get("read_only", False):
kwargs["queryset"] = kwargs.get(
"queryset", self.serializer.Meta.model.objects.all()
)
self.allow_insert = kwargs.pop("allow_insert", False)
# kwargs["allow_null"] = kwargs.get("allow_null", self.serializer.Meta.model._meta.get_field(self.source).null)
super().__init__(**kwargs)
def use_pk_only_optimization(self):
return False if self.serializer else True
def to_representation(self, instance):
if self.serializer:
return self.serializer(instance, context=self.context).data
return super().to_representation(instance)
def to_internal_value(self, data):
if isinstance(data, dict):
instance = (
self.get_queryset()
.filter(**{self.lookup: data.get(self.lookup, None)})
.first()
)
if self.allow_insert is True and len(data.keys()) and self.serializer:
serialized_data = self.serializer(
instance=instance, data=data, context=self.context
)
if serialized_data.is_valid(raise_exception=ValueError):
instance = serialized_data.save()
return instance
return super().to_internal_value(data)
def get_choices(self, cutoff=None):
queryset = self.get_queryset()
if queryset is None:
return {}
if cutoff is not None:
queryset = queryset[:cutoff]
return OrderedDict(
[
(
super(RelatedField, self).to_representation(item),
self.display_value(item),
)
for item in queryset
]
)
@classmethod
def many_init(cls, *args, **kwargs):
class ManyRelatedField(serializers.ManyRelatedField):
def to_internal_value(self, data):
if isinstance(data, str) or not hasattr(data, "__iter__"):
self.fail("not_a_list", input_type=type(data).__name__)
if not self.allow_empty and len(data) == 0:
self.fail("empty")
child = self.child_relation
instances = {
getattr(item, child.lookup): item
for item in child.get_queryset().filter(
**{
f"{child.lookup}__in": [
item.get(child.lookup, None)
if isinstance(item, dict)
else item
for item in data
]
}
)
}
if child.allow_insert is True and len(data.keys()) and child.serializer:
for item in data:
serialized_data = child.serializer(
instance=instances.get(item.get(child.lookup)),
data=item,
context=child.context,
)
if serialized_data.is_valid(raise_exception=ValueError):
instance = serialized_data.save()
instances[instance.pk] = instance
return instances.values()
list_kwargs = {"child_relation": cls(*args, **kwargs)}
for key in kwargs:
if key in relations.MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return ManyRelatedField(**list_kwargs)
|
lotrekagency/camomilla
|
camomilla/serializers/fields/related.py
|
related.py
|
py
| 4,402 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "rest_framework.serializers.PrimaryKeyRelatedField",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers.ManyRelatedField",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "rest_framework.relations.MANY_RELATION_KWARGS",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.relations",
"line_number": 104,
"usage_type": "name"
}
] |
18959826347
|
from rest_framework.decorators import api_view, permission_classes
import random
import string
from pprint import pprint as pp
import requests
from allauth.account.models import EmailAddress
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from points.views import new_user_point
from .serializers import *
User = get_user_model()
@api_view(['POST'])
@permission_classes([AllowAny])
def kakao_login_and_get_userinfo(request):
code = request.data.get('code')
headers = {
'Content-type': 'application/x-www-form-urlencoded',
}
body = {
'grant_type': 'authorization_code',
'client_id': 'dcf8cc38ec4e7ec39baf6207a53ed140',
'redirect_uri': 'https://kickin.kr/loading/',
'code': code,
}
response = requests.post(headers=headers, url='https://kauth.kakao.com/oauth/token', data=body)
access_token = response.json().get('access_token')
headers = {
'Authorization': f'Bearer {access_token}',
'Content-type': 'application/x-www-form-urlencoded;charset=utf-8',
}
info_request = requests.get(url='https://kapi.kakao.com/v2/user/me', headers=headers)
info_res = info_request.json()
nickname = info_res.get('properties').get('nickname')
email = info_res.get('kakao_account').get('email')
# 해당 이메일을 사용해 가입한 이력이 있는지, 확인한다.
# 해당 이메일로 가입한 이력이 없다면, 새로운 유저를 생성한다.
user = User.objects.filter(email=email)
if not user:
user = User.objects.create_user(email=email, password='Kakao_' + nickname + '977')
user.login_type = 1
user.save()
# 카카오 로그인의 경우 별도의 이메일 인증을 생략
EmailAddress.objects.create(user=user, email=email, verified=True, primary=True)
# 해당 유저의 정보를 업데이트한다. : login_type = 1 (카카오 로그인)
# user Info 생성
user_info, user_created = UserInfo.objects.get_or_create(user=user)
new_user_point(user.id) # 해당 유저의 포인트를 생성한다.
# 소셜 로그인 정보는, 언제든 바뀔 수 았기 때문에 굳이 저장하지 않는다.
kakao_profile = info_res.get('kakao_account').get('profile').get('profile_image_url')
kakao_nickname = info_res.get('properties').get('nickname')
# 로그인 응답 데이터 생성
response_data = {
'kakao_profile': kakao_profile,
'kakao_nickname': kakao_nickname,
'kakao_email': email, # 로그인 처리를 위해 응답 데이터에 이메일을 포함시킨다. / 비밀번호는 패턴화 되어있다. (Kakao_ + nickname + 977)
}
return Response(data=response_data, status=status.HTTP_200_OK)
@api_view(['POST'])
@permission_classes([AllowAny])
def kakao_test(request):
code = request.data.get('code')
headers = {
'Content-type': 'application/x-www-form-urlencoded',
}
body = {
'grant_type': 'authorization_code',
'client_id': 'dcf8cc38ec4e7ec39baf6207a53ed140',
'redirect_uri': 'http://localhost:8080/loading/',
'code': code,
}
response = requests.post(headers=headers, url='https://kauth.kakao.com/oauth/token', data=body)
pp(response.json())
access_token = response.json().get('access_token')
headers = {
'Authorization': f'Bearer {access_token}',
'Content-type': 'application/x-www-form-urlencoded;charset=utf-8',
}
info_request = requests.get(url='https://kapi.kakao.com/v2/user/me', headers=headers)
info_res = info_request.json()
pp(info_res)
return Response(data=info_res, status=status.HTTP_200_OK)
|
isaacShin-dev/kickin
|
accounts/social_views.py
|
social_views.py
|
py
| 3,846 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.post",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "allauth.account.models.EmailAddress.objects.create",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "allauth.account.models.EmailAddress.objects",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "allauth.account.models.EmailAddress",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "points.views.new_user_point",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.permission_classes",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rest_framework.permissions.AllowAny",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.permission_classes",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "rest_framework.permissions.AllowAny",
"line_number": 79,
"usage_type": "name"
}
] |
21275819456
|
"""Defines all necessary networks for training / evaluation
"""
from typing import Optional, Tuple
import mindspore.nn as nn
from mindspore import Tensor
from .backbones import Backbone
from .decoders import Decoder
from .heads import Head
from .loss import Loss
from .necks import Neck
class Net(nn.Cell):
"""Create network for foward and backward propagate.
Args:
backbone: Model backbone
head: Model head
neck: Model neck. Default: None
Inputs:
| x: Tensor
Outputs:
| result: Tensor
"""
def __init__(
self, backbone: Backbone, head: Head, neck: Optional[Neck] = None
) -> None:
super().__init__()
self.backbone = backbone
self.head = head
self.neck = neck
self.has_neck = self.neck is not None
def construct(self, x: Tensor) -> Tensor:
x = self.backbone(x)
if self.has_neck:
x = self.neck(x)
x = self.head(x)
return x
class EvalNet(nn.Cell):
"""Create network for forward propagate and decoding only.
Args:
net: Network used for foward and backward propagate
decoder: Decoder
output_raw: Return extra net's ouput. Default: True
Inputs:
| inputs: List of tensors
Outputs
| result: Decoded result
| raw_result (optional): Raw result if output_raw is true
"""
def __init__(self, net: Net, decoder: Decoder, output_raw: bool = True) -> None:
super().__init__()
self.net = net
self.decoder = decoder
self.output_raw = output_raw
self.net.set_train(False)
self.decoder.set_train(False)
def construct(self, *inputs: Tensor) -> Tuple[Tensor, ...]:
x = self.net(inputs[0])
result = self.decoder(x, *inputs[1:])
if self.output_raw:
return result, x
return result
class NetWithLoss(nn.Cell):
"""Create network with loss.
Args:
net: Network used for foward and backward propagate
loss: Loss cell
has_extra_inputs: Has Extra inputs in the loss calculation. Default: False
Inputs:
| data: Tensor feed into network
| label: Tensor of label
| extra_inputs: List of extra tensors used in loss calculation
Outputs:
| loss: Loss value
"""
def __init__(self, net: Net, loss: Loss, has_extra_inputs: bool = False) -> None:
super().__init__()
self.net = net
self.loss = loss
self.has_extra_inputs = has_extra_inputs
def construct(self, data: Tensor, label: Tensor, *extra_inputs: Tensor) -> Tensor:
out = self.net(data)
if self.has_extra_inputs:
return self.loss(out, label, *extra_inputs)
return self.loss(out, label)
|
mindspore-lab/mindpose
|
mindpose/models/networks.py
|
networks.py
|
py
| 2,807 |
python
|
en
|
code
| 15 |
github-code
|
6
|
[
{
"api_name": "mindspore.nn.Cell",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "backbones.Backbone",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "heads.Head",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "necks.Neck",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "mindspore.Tensor",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Cell",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "decoders.Decoder",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "mindspore.Tensor",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Cell",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "loss.Loss",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "mindspore.Tensor",
"line_number": 102,
"usage_type": "name"
}
] |
28031461245
|
#!/usr/bin/python3
from time import sleep
from datetime import date, datetime
from pynput.keyboard import Key, Controller
from logging.handlers import RotatingFileHandler
import sys, signal, argparse, logging, platform, subprocess
# ----------------------------------Configuration--------------------------------
VOLUME = "0.3"
BREAK_NUM = 1
WORK_DURATION = 900
BREAK_DURATION = 120
MAC = False
LINUX = False
WINDOWS = False
LINUX_PATH = ""
MAC_PATH = "/Users/mutnawaz/Desktop/Muteeb/Code/timer/"
WINDOWS_PATH = "C:\\Users\\Muteeb\\Desktop\\RV Major Project\\Personal\\timer\\"
# ---------------------------------end of Configuration---------------------------
log = None
def __init_logger():
global log
if log is not None:
log.debug("logger already initialized.")
return None
try:
"log format <data/time:level:filename:line:function:message>"
log_formatter = logging.Formatter("%(levelname)5.5s %(filename)5s#%(lineno)3s %(message)s")
"Refer the log file path"
PATH = get_path()
log_file = PATH + "timer.log"
"Max size of the log file is 2MB, it rotate if size exceeds"
handler = RotatingFileHandler(
log_file,
mode="a",
maxBytes=(2 * 1024 * 1024),
backupCount=4,
encoding=None,
delay=0,
)
"appy the log format and level"
handler.setFormatter(log_formatter)
handler.setLevel(logging.DEBUG)
log = logging.getLogger("timer.log")
log.setLevel(logging.DEBUG)
"apply the settings to the log"
log.addHandler(handler)
log.debug("Start logging the times")
return handler
except Exception as e:
log.error("Failed to create logger: %s", str(e))
def exit_handler(sig, frame):
print("\nGood bye. Have a nice day!\n")
greet()
sys.exit(0)
def greet():
try:
print(subprocess.check_output("motivate", shell=True, stderr=subprocess.DEVNULL).decode())
except:
print("\n******************************************************")
print("* *")
print("* *")
print("* You can do it! Sending lots of energy to you :) *")
print("* *")
print("* *")
print("******************************************************")
def get_time():
now = datetime.now()
time = now.strftime("%H:%M:%S")
return time
def play_sound(sound_file):
if MAC:
subprocess.check_output("afplay --volume " + VOLUME + " {}".format(sound_file), shell=True)
elif LINUX:
subprocess.check_output("aplay -q {}&".format(sound_file), shell=True)
else:
winsound.PlaySound(sound_file, winsound.SND_ASYNC)
def get_path():
if MAC:
return MAC_PATH
elif LINUX:
return LINUX_PATH
else:
return WINDOWS_PATH
def display_sleep():
if MAC:
# subprocess.check_output("pmset displaysleepnow", shell=True) # Put system to sleep.
subprocess.check_output("open -a ScreenSaverEngine", shell=True)
def wakeup():
if MAC:
# subprocess.check_output("pmset relative wake 1", shell=True) # Wakeup the system.
# log.debug("Waking up.")
keyboard = Controller()
key = Key.esc
keyboard.press(key)
keyboard.release(key)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--slient", action="store_true", help="Run in silent mode.")
args = vars(parser.parse_args())
if platform.system() == "linux" or platform.system() == "linux2":
LINUX = True
elif platform.system() == "darwin" or platform.system() == "Darwin":
MAC = True
elif platform.system() == "win32" or platform.system() == "Windows":
WINDOWS = True
if not args["slient"]:
try:
import winsound
except Exception as e:
print("Sound is not supported in windows. Reason: {0}".format(e))
args["slient"] = True
__init_logger()
PATH = get_path()
signal.signal(signal.SIGINT, exit_handler)
greet()
if args["slient"]:
print("Running in slient mode...")
log.info("Today's date: {0}".format(date.today()))
if not args["slient"]:
play_sound(PATH + "start_timer.wav")
while True:
log.info("Work number {0}, start time {1}".format(BREAK_NUM, get_time()))
sleep(WORK_DURATION)
log.info("Work number {0}, end time {1}".format(BREAK_NUM, get_time()))
if not args["slient"]:
play_sound(PATH + "take_break.wav")
display_sleep()
log.info("Break number {0}, start time {1}".format(BREAK_NUM, get_time()))
sleep(BREAK_DURATION)
log.info("Break number {0}, end time {1}".format(BREAK_NUM, get_time()))
if not args["slient"]:
play_sound(PATH + "two_mins_up.wav")
wakeup()
BREAK_NUM += 1
|
muteebakram/Timer
|
main.py
|
main.py
|
py
| 5,198 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.Formatter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "logging.handlers.RotatingFileHandler",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "subprocess.DEVNULL",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "subprocess.check_output",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard.Controller",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard.Key.esc",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "pynput.keyboard.Key",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "signal.signal",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "signal.SIGINT",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 168,
"usage_type": "call"
}
] |
12510085973
|
from tqdm import tqdm
import math
import time
import numpy as np
def bingliu_mpqa(utterance_tokenized, file):
feat_ = []
dict1_bing = {}
for line in file:
x = line.split("\t")
dict1_bing[x[0] + "_" + x[1][:-1]] = 1
i=0
for tokens in utterance_tokenized:
res = np.array([0,0,0,0])
for token in tokens:
pos = (token + "_positive")
neg = (token + "_negative")
if (pos in dict1_bing):
res[0]+=1
res[1]+=1
elif (neg in dict1_bing):
res[1]-=1
if res[0]>0:
res[2]=1
if tokens!=[]:
pos = tokens[-1] + "_positive"
neg = tokens[-1] + "_negative"
if pos in dict1_bing:
res[3]=1
elif neg in dict1_bing:
res[3]=-1
feat_.append(res)
return np.array(feat_)
def SENT140(X):
#sentiment140
dict1_S140 = {}
with open("lexicons/3. Sentiment140-Lexicon-v0.1/unigrams-pmilexicon.txt", 'r') as fd:
for line in fd:
x = line.split(" ")
dict1_S140[x[0]] = float(x[1])
feat_ = []
for tokens in X:
sent140 = [0,0,0,0]
cnt = 0
for token in tokens:
if("#" not in token):
cnt += 1
if(token in dict1_S140):
sent140[0] += (dict1_S140[token] > 0)
sent140[1] += dict1_S140[token]
sent140[2] = max(sent140[2],dict1_S140[token])
if(len(tokens) >= 1 and tokens[-1] in dict1_S140):
sent140[3] = (dict1_S140[tokens[-1]] > 0)
feat_.append(sent140)
return np.array(feat_)
# print()
def NRC_EMOTION(X):
#NRC emotion
dict1_NRC = {}
cnt_r = 0
len1 = 0;
with open("lexicons/6. NRC-10-expanded.csv", 'r') as fd:
for line in fd:
if(cnt_r == 0):
cnt_r += 1
continue;
x = line.split(" ")
dict1_NRC[x[0]] = [float(i) for i in x[1:]]
len1 = len(x[1:])
feat_ = []
for e,tokens in tqdm(enumerate(X)):
emo_score = [[0,0,0,0] for i in range(len1)]
cnt = 0
for token in tokens:
if("#" in token):
continue
cnt += 1
if(token in dict1_NRC):
for i,val in enumerate(dict1_NRC[token]):
emo_score[i][0] += (val > 0)
emo_score[i][1] += val
emo_score[i][2] = max(emo_score[i][2],val)
if(len(tokens) >= 1 and tokens[-1] in dict1_NRC):
for i,val in enumerate(dict1_NRC[token]):
emo_score[i][3] = (val > 0)
res = []
for i in emo_score:
res.extend(i)
feat_.append(res)
return np.array(feat_)
# print()
def NRC_HASHTAG_SENT(X):
#NRC hashtag
dict1_NRC = {}
with open("lexicons/7. NRC-Hashtag-Sentiment-Lexicon-v0.1/unigrams-pmilexicon.txt", 'r') as fd:
for line in fd:
x = line.split(" ")
dict1_NRC[x[0]] = float(x[1])
feat_ = []
for tokens in X:
cnt = 0
f = [0,0,0,0]
for token in tokens:
if("#" not in token):
continue
cnt += 1
if(token in dict1_NRC):
f[0] += (dict1_NRC[token] > 0)
f[1] += dict1_NRC[token]
f[2] = max(f[2],dict1_NRC[token])
if(len(tokens) >= 1 and tokens[-1] in dict1_NRC):
f[3] = (dict1_NRC[tokens[-1]] > 0)
feat_.append(f)
return np.array(feat_)
def lexicons(utterance_tokenized):
filebingliu = open("lexicons/1. BingLiu.csv", "r")
filempqa = open("lexicons/2. mpqa.txt", "r")
start = time.time()
bingliu = bingliu_mpqa(utterance_tokenized, filebingliu)
mpqa = bingliu_mpqa(utterance_tokenized, filempqa)
sent140 = SENT140(utterance_tokenized)
nrcemotion = NRC_EMOTION(utterance_tokenized)
nrchashtag = NRC_HASHTAG_SENT(utterance_tokenized)
end = time.time()
print("time to calculate lexicons: ", end-start)
# y = len(bingliu[0]) + len([mpqa[0]]) + len(sent140[0]) + len(nrcemotion[0]) + len(nrchashtag[0])
feature = np.zeros([len(utterance_tokenized), 56])
for i in range(len(utterance_tokenized)):
feature[i] = np.concatenate((bingliu[i], mpqa[i], sent140[i], nrcemotion[i], nrchashtag[i]))
return feature
if __name__=='__main__':
lexicons(utterance_tokenized)
|
hamzah70/Multi_Modal_Emotion_Analysis
|
lexiconFeatureVector.py
|
lexiconFeatureVector.py
|
py
| 4,491 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 133,
"usage_type": "call"
}
] |
19411076487
|
def create_offering(newOffering):
classTimesArray = []
if newOffering.classTimes:
for classTime in newOffering.classTimes:
classTime = {
u'location': classTime.location,
u'startTime': classTime.startTime,
u'endTime': classTime.endTime,
u'sunday': classTime.sunday,
u'monday': classTime.monday,
u'tuesday': classTime.tuesday,
u'wednesday': classTime.wednesday,
u'thursday': classTime.thursday,
u'friday': classTime.friday,
u'saturday': classTime.saturday
}
classTimesArray.append(classTime)
extrasDict = {
u'Attributes': newOffering.attributes,
u'Levels':newOffering.levels,
u'Total Seats': newOffering.totalSeats,
u'Taken Seats': newOffering.takenSeats,
u'Total Waitlist Seats': newOffering.totalWaitlistSeats,
u'Taken Waitlist Seats': newOffering.takenWaitlistSeats
}
return {
u'sectionNumber': newOffering.sectionNumber,
u'status': newOffering.status,
u'id': newOffering.id,
u'instructors': newOffering.instructors,
u'classTimes': classTimesArray,
u'extras': extrasDict
}
class Offering:
status = None
levels = None
id = None
departmentName = None
departmentAcronym = None
departmentNumberString = None
departmentNumber = None
sectionNumber = None
name = None
credit = None
classTimes = None
startDate = None
endDate = None
comment = None
attributes = None
booksLink = None
bulletinLink = None
description = None
instructors = None
totalSeats = None
takenSeats = None
totalWaitlistSeats = None
takenWaitlistSeats = None
class ClassTime:
location = None
startTime = None
endTime = None
sunday = False
monday = False
tuesday = False
wednesday = False
thursday = False
friday = False
saturday = False
import requests
from datetime import datetime
import pytz
from pytz import timezone
eastern = timezone('EST')
import re
from bs4 import BeautifulSoup
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import google.cloud.exceptions
import urllib
print ("-------- EMERSON COURSE SCRAPE ----------")
cred = credentials.Certificate('./credentials.json')
firebase_admin.initialize_app(cred)
#
db = firestore.client()
# Make request and load offerings
data = {'begin_ap':'a','begin_hh':'0','begin_mi':'0','end_ap':'a','end_hh':'0','end_mi':'0',
'sel_attr':['dummy','%'],'sel_camp':['dummy','%'],'sel_crse':'','sel_day':'dummy','sel_from_cred':'',
'sel_insm':'dummy','sel_instr':['dummy','%'],'sel_levl':['dummy','%'],'sel_ptrm':['dummy','%'],
'sel_schd':['dummy','%'],'sel_sess':'dummy','sel_subj':['dummy','BC','MB','CM','CD','CC','DA','DD','EC',
'EXT','FL','LF','HI','HS','IN','JR','LI','MK','MT','MU','PA','PH','PL','PF','PDE','CE','PS','PB','RL',
'SOC','SA','SC','SW','SO','LS','TH','VM','WDC','WR'],'sel_title':'','sel_to_cred':'','term_in':'201910'}
url = "https://ssb.emerson.edu/PURPLE/bwckschd.p_get_crse_unsec"
# get departments and instructors first
print("Fetching homepage...")
dataHomepage = dict(data)
dataHomepage['sel_subj'] = 'dummy'
r = requests.post(url, data=dataHomepage)
soup = BeautifulSoup(r.content, "html.parser")
unlistedDepts = {
"Bsns of Creative Enterprises": "BC",
"Civic Media": "CM",
"External Program Course": "EXT VAL",
"Prof Development Experience":"PDE",
"School of Communication":"SOC",
"Washington Program":"DC"
}
print("Page fetched. Uploading departments...")
departments = soup.find('td', class_='dedefault').find_all('option')
departmentsArray = []
for department in departments:
info = department.text.split("(")
if len(info)>1:
deptDict = {
u'departmentAcronym':re.sub('[^A-Z]','', info[1].strip()),
u'departmentName':info[0].strip()
}
else:
deptDict = {
u'departmentAcronym':unicode(unlistedDepts[info[0].strip()]),
u'departmentName':info[0].strip()
}
departmentsArray.append(deptDict)
doc_ref = db.collection(u'schools/emerson/lists').document('departments')
doc_ref.set({u'list':departmentsArray})
print("Departments uploaded. Uploading instructors...")
instructors = soup.find('select', attrs={"name": "sel_instr"}).find_all('option')
instructorsArray = []
for p in range(1,len(instructors)):
instructor = re.sub(' +', ' ',instructors[p].text.strip())
if not instructor in instructorsArray:
instructorsArray.append(instructor)
doc_ref = db.collection(u'schools/emerson/lists').document('instructors')
doc_ref.set({u'list':instructorsArray})
print("Instructors uploaded. Uploading courses. Fetching all courses on one page...")
# Long, full networking request
r = requests.post(url, data=data)
print("Page fetched. Parsing and uploading...")
soup = BeautifulSoup(r.content,"html.parser")
# Speedier file test
# file = urllib.urlopen("file:///Users/timtraversy/Google Drive//Development/Course Gnome/code/GWU-Scrape-Python/test.html")
# soup = BeautifulSoup(file,"html.parser")
offering_table = soup.find('table', class_='datadisplaytable')
offerings = offering_table.find_all('tr', recursive=False)
courseArray = []
# Loop over offerings two at a time to get both data pieces
count = 0
for i in range(0,len(offerings),2):
# Set up offering object
newOffering = Offering()
data = offerings[i].text.split(' - ')
# Hack to account for class names that have a " - "
offset = 0
if len(data) > 4:
concatName = data[0].strip()
for m in range(1, len(data)-3):
concatName += " - "
concatName += data[m].strip()
offset += 1
newOffering.name = concatName
else:
newOffering.name = data[0].strip()
if newOffering.name == 'Cancelled':
continue
newOffering.id = data[1+offset].strip()
newOffering.departmentAcronym = data[2+offset].strip().split(' ')[0]
if newOffering.departmentAcronym == "EXT":
newOffering.departmentAcronym = unicode("EXT VAL")
newOffering.departmentName = unicode("External Program Course")
else:
for dept in departmentsArray:
if dept[u'departmentAcronym'] == newOffering.departmentAcronym:
newOffering.departmentName = dept[u'departmentName']
newOffering.departmentNumber = data[2+offset].strip().split(' ')[1]
newOffering.sectionNumber = data[3+offset].strip()
# Get seat details + status
url = "https://ssb.emerson.edu" + offerings[i].find('a')['href']
r = requests.post(url)
detailSoup = BeautifulSoup(r.content,"html.parser")
seats = detailSoup.find_all('td', class_="dddefault")
# Seats
newOffering.totalSeats = seats[1].text
newOffering.takenSeats = seats[2].text
# newOffering.totalWaitlistSeats = seats[4].text
# newOffering.takenWaitlistSeats = seats[5].text
# Status
if newOffering.totalSeats > newOffering.takenSeats:
newOffering.status = u'OPEN'
elif newOffering.totalWaitlistSeats == '0':
newOffering.status = u"CLOSED"
else:
newOffering.status = u"WAITLIST"
# get levels and attributes
data = offerings[i+1].find_all('span')
for span in data:
if span.text.strip() == 'Levels:':
newOffering.levels = span.next_sibling.strip()
elif span.text.strip() == 'Attributes:':
newOffering.attributes = span.next_sibling.strip()
# Credits
catalog_entry = offerings[i+1].find('a')
credits = catalog_entry.previous_sibling.previous_sibling.previous_sibling.strip()
credits = re.sub('Credits','', credits).strip()
credits = re.sub('\.0+','', credits).strip()
credits = re.sub('OR','or', credits)
credits = re.sub('TO','to', credits)
credits = re.sub(' +',' ', credits)
newOffering.credit = unicode(credits)
# Description from catalog entry
url = "https://ssb.emerson.edu" + catalog_entry['href']
r = requests.post(url)
catalogSoup = BeautifulSoup(r.content,"html.parser")
newOffering.description = catalogSoup.find('td', class_="ntdefault").text.split('\n')[1].strip()
#Class Times
instructors = []
classTimes=[]
class_time_table = offerings[i+1].find('table',class_='datadisplaytable')
if class_time_table:
class_time_table = class_time_table.find_all('tr')
for j in range(1,len(class_time_table)):
newClassTime = ClassTime()
details = class_time_table[j].find_all('td',class_='dddefault')
for k in range (1,len(details)):
text = details[k].text.strip()
valid = True
if k == 1:
if text != 'TBA':
times = text.split('-')
newClassTime.startTime = eastern.localize(datetime.strptime(times[0].strip(), '%I:%M %p'))
newClassTime.endTime = eastern.localize(datetime.strptime(times[1].strip(), '%I:%M %p'))
else:
valid = False
break
if k == 2:
if 'U' in text:
newClassTime.sunday = True
if 'M' in text:
newClassTime.monday = True
if 'T' in text:
newClassTime.tuesday = True
if 'W' in text:
newClassTime.wednesday = True
if 'R' in text:
newClassTime.thursday = True
if 'F' in text:
newClassTime.friday = True
if 'S' in text:
newClassTime.saturday = True
if k == 3:
# location
newClassTime.location = text
if k == 6:
insts = re.sub('\([A-z]\)','', text).split(',')
for inst in insts:
if inst == "TBA":
instructors = None
break
newInst = inst.strip()
if not newInst in instructors:
instructors.append(newInst)
if valid:
classTimes.append(newClassTime)
if classTimes:
newOffering.classTimes = classTimes
if instructors:
newOffering.instructors = instructors
courseArray.append(newOffering)
print('Parsed: {id}, Count:{len}'.format(id=unicode(newOffering.id), len=len(courseArray)))
count = 0
for indx, course in enumerate(courseArray):
offeringsArray = [create_offering(course)]
index = indx + 1
while index < len(courseArray):
courseTwo = courseArray[index]
if (course.name == courseTwo.name and course.departmentNumber == courseTwo.departmentNumber and course.departmentAcronym == courseTwo.departmentAcronym):
offeringsArray.append(create_offering(courseTwo))
del courseArray[index]
else:
index += 1
dictionary = {
u'departmentName': course.departmentName,
u'departmentAcronym': course.departmentAcronym,
u'departmentNumber': course.departmentNumber,
u'name': course.name,
u'credit': course.credit,
u'description': course.description,
u'offerings': offeringsArray,
}
identifier = unicode(course.departmentAcronym + str(course.departmentNumber))
db.collection(u'schools/emerson/fall2018_courses').document(identifier).set(dictionary)
count += 1
print('Uploaded ({count}/{total}): {id}'.format(count=count, total=len(courseArray), id=course.id))
# Updating version number
doc_ref = db.collection(u'schools').document(u'emerson')
try:
doc = doc_ref.get()
version = doc.to_dict()['version']
print(u'Updating from version {}'.format(doc.to_dict()['version']))
doc_ref.set({u'version':version + 1})
except google.cloud.exceptions.NotFound:
print(u'No metadata, something is wrong.')
exit(1)
print ("----- EMERSON COURSE SCRAPE COMPLETE ------")
|
timtraversy/GWU-Scrape-Python
|
emerson-scrape.py
|
emerson-scrape.py
|
py
| 12,340 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pytz.timezone",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "firebase_admin.credentials.Certificate",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "firebase_admin.credentials",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "firebase_admin.initialize_app",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "firebase_admin.firestore.client",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "firebase_admin.firestore",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "google.cloud.exceptions.cloud",
"line_number": 333,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.exceptions",
"line_number": 333,
"usage_type": "name"
}
] |
17509722663
|
'''
Problem Statement
Your company has a big conference coming up and needs to book conference rooms in a convention center. To help the company save budget, we want to book as few conference rooms as possible given a list of meeting schedules that contains only the starting and ending time of each meeting. Write a program that helps figure out the minumum number of conference rooms needed.
Example:
[(2,7)] -> Output: 1
[(0,30),(5,10),(15,20) (21 22) (21 28) ] -> Explanation: Room1: (0,30) Room2: (5,10),(15,20) -> Output: 2
(0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30)
examples
(0,30),
(5,10), (15,20), (21 22)
(21 28)
(0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30)
assumptions
approaches
1)
(0,30),
(5,22),
(21 28)
2)
(0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30)
0 1 2 3 4 5
0 30
count: 1
create a res array
for any new interval, look in res for a place where int has no intersection. this space defines a room!
tradeoffs
this appears to be the only way
'''
from typing import List, Tuple
def roomcount(times: List[Tuple[int, int]]) -> int:
'''
s1------e1
s2-------e2
'''
def intersects(start1, end1, start2, end2):
return min(end1, end2) > max(start1, start2)
def no_intersects(lis):
for int_ in lis:
if intersects(*int_, start, end): # return true if they touch?
return False
return True
rooms = []
for start, end in times:
for lis in rooms:
if no_intersects(lis):
lis.append((start, end))
break
else:
rooms.append([(start, end)])
return len(rooms)
ints = [(2,7)] # -> Output: 1
print(roomcount(ints))
ints = [(0,30),(5,10),(15,20), (21, 22), (21, 28) ] #3
print(roomcount(ints))
ints = [(0,30),(5,10),(15,20),(21, 22), (22, 28) ] #2
print(roomcount(ints))
|
soji-omiwade/cs
|
dsa/before_rubrik/minimum_rooms.py
|
minimum_rooms.py
|
py
| 1,937 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 37,
"usage_type": "name"
}
] |
21721374854
|
import os
import math
import json
import librosa
from settings import (
SAMPLE_RATE,
NUM_MFCC,
N_FTT,
HOP_LENGTH,
NUM_SEGMENTS,
DURATION,
)
DATASET_PATH = "data\\archive\\Data\\genres_original" # loaded using the GTZAN Music Genre Classification dataset at https://www.kaggle.com/datasets/andradaolteanu/gtzan-dataset-music-genre-classification
JSON_PATH = "data\\data.json"
SAMPLES_PER_TRACK = SAMPLE_RATE * DURATION
def dump_mfccs_to_json(dataset_path=None):
"""
Processes test data as MFCCs and labels
"""
dataset_path = dataset_path if dataset_path is not None else DATASET_PATH
data = {
"mapping": [],
"mfcc": [],
"labels" : [],
}
samples_per_segment = int(SAMPLES_PER_TRACK/NUM_SEGMENTS)
expected_mfcc = math.ceil(samples_per_segment/HOP_LENGTH)
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
if dirpath is not dataset_path:
dirpath_components = dirpath.split("\\")
label = dirpath_components[-1]
data["mapping"].append(label)
print(f"Processing: {label}")
for f in filenames:
file_path = os.path.join(dirpath, f)
signal, sr = librosa.load(file_path, sr=SAMPLE_RATE)
for s in range(NUM_SEGMENTS):
start_sample = samples_per_segment * s
finish_sample = start_sample + samples_per_segment
mfcc = librosa.feature.mfcc(signal[start_sample:finish_sample], sr=sr, n_fft=N_FTT, n_mfcc=NUM_MFCC, hop_length=HOP_LENGTH)
mfcc = mfcc.T
if len(mfcc) == expected_mfcc:
data["mfcc"].append(mfcc.tolist())
data["labels"].append(i-1)
print(f"{file_path}, segment:{s+1}")
with open(JSON_PATH, "w") as fp:
json.dump(data, fp, indent=4)
if __name__ == "__main__":
dump_mfccs_to_json()
|
jmrossi98/genre_detect
|
src/preprocess_data.py
|
preprocess_data.py
|
py
| 2,051 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "settings.SAMPLE_RATE",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "settings.DURATION",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "settings.NUM_SEGMENTS",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "math.ceil",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "settings.HOP_LENGTH",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "os.walk",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "librosa.load",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "settings.SAMPLE_RATE",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "settings.NUM_SEGMENTS",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "librosa.feature.mfcc",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "librosa.feature",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "settings.N_FTT",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "settings.NUM_MFCC",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "settings.HOP_LENGTH",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 54,
"usage_type": "call"
}
] |
12423871357
|
__author__ = "Vanessa Sochat, Alec Scott"
__copyright__ = "Copyright 2021-2023, Vanessa Sochat and Alec Scott"
__license__ = "Apache-2.0"
import json
import os
import re
import shlex
import subprocess
import pakages.builders.spack.cache as spack_cache
import pakages.client
import pakages.oras
import pakages.utils
from pakages.logger import logger
class SpackClient(pakages.client.PakagesClient):
"""
Pakages has a main controller for interacting with pakages.
"""
def parse_package_request(self, packages):
"""
Parse the packages and repo (if any) from it.
This is shared between install and build
"""
# By defualt, assume not adding a repository
repo = None
if not isinstance(packages, list):
packages = shlex.split(packages)
# Case 1: we have an install directed at the present working directory
if packages and packages[0] == ".":
repo = os.getcwd()
packages.pop(0)
# If we have a path (akin to the first)
if packages and os.path.exists(packages[0]):
repo = packages.pop(0)
# OR if we have a github URI TODO, can clone here
if packages and re.search("(http|https)://github.com", packages[0]):
repo = packages.pop(0)
# If we don't have packages and we have a repo, derive from PWD
if repo and not packages:
for path in pakages.utils.recursive_find(repo, "package.py"):
packages.append(os.path.basename(os.path.dirname(path)))
# Finally, add the repository
if repo:
self.add_repository(repo)
return packages
def list_installed(self):
"""
List installed packages
"""
command = ["spack", "find"]
for line in pakages.utils.stream_command(command):
print(line.strip("\n"))
command = ["spack", "find", "--json"]
result = pakages.utils.run_command(command)
return json.loads(result["message"])
def build(self, packages, cache_dir=None, key=None, **kwargs):
"""
Build a package into a cache
"""
packages = self.parse_packages(packages)
# Prepare a cache directory
cache = spack_cache.BuildCache(
spec_name=packages,
cache_dir=cache_dir or self.settings.cache_dir,
username=self.settings.username,
email=self.settings.email,
settings=self.settings,
)
# Install all packages
self._install(packages)
cache.create(packages, key=key)
# Push function is on cache, if desired
return cache
def parse_packages(self, packages):
"""
Helper function to ensure we return consistent names.
"""
packages = self.parse_package_request(packages)
if isinstance(packages, list):
packages = packages[0]
if " " in packages:
logger.exit("We currently only support one package for build.")
logger.info(f"Preparing package {packages}")
return packages
def add_repository(self, path):
"""
Add a repository.
Given a path that exists, add the repository to the
underlying spack. If you need to add a GitHub uri, create a
pakages.repo.PakRepo first.
"""
try:
command = ["spack", "repo", "add", path]
for line in pakages.utils.stream_command(command):
logger.info(line.strip("\n"))
except subprocess.CalledProcessError as e:
if "Repository is already registered" in e.output:
pass
else:
raise e
def download_cache(self, target, download_dir=None):
"""
Download a target to a cache download directory
"""
download_dir = download_dir or pakages.utils.get_tmpdir()
reg = pakages.oras.get_oras_client()
# This will error if not successful, result is a list of files
reg.pull(target=target, outdir=download_dir)
return download_dir
def install(self, packages, **kwargs):
"""
Install one or more packages.
"""
packages = self.parse_packages(packages)
use_cache = kwargs.get("use_cache", False)
if use_cache:
cache_dir = self.download_cache(use_cache)
cache = spack_cache.BuildCache(
packages, cache_dir=cache_dir, settings=self.settings
)
# Cache is named after target, this is a filesystem mirror
cache.add_as_mirror(re.sub("(-|:|/)", "-", use_cache))
# Prepare install command with or without cache
command = ["spack", "install"]
if use_cache:
command.append("--use-cache")
if isinstance(packages, list):
command.append(" ".join(packages))
else:
command.append(packages)
# Install packages using system spack - we aren't responsible for this working
for line in pakages.utils.stream_command(command):
logger.info(line.strip("\n"))
def _install(self, packages):
"""
Install one or more packages.
This eventually needs to take into account using the GitHub packages bulid cache
"""
# Install packages using system spack - we aren't responsible for this working
for line in pakages.utils.stream_command(["spack", "install", packages]):
logger.info(line.strip("\n"))
def uninstall(self, packages):
"""
Uninstall a spack package
"""
for line in pakages.utils.stream_command(["spack", "uninstall", packages]):
logger.info(line.strip("\n"))
|
syspack/pakages
|
pakages/builders/spack/client.py
|
client.py
|
py
| 5,794 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pakages.builders.spack.cache.client",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "shlex.split",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.utils.recursive_find",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.utils",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.utils.stream_command",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.utils",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "pakages.builders.spack.cache.utils.run_command",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.utils",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.BuildCache",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "pakages.logger.logger.exit",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pakages.logger.logger",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "pakages.logger.logger.info",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pakages.logger.logger",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "pakages.builders.spack.cache.utils.stream_command",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.utils",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "pakages.logger.logger.info",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pakages.logger.logger",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "pakages.builders.spack.cache.utils.get_tmpdir",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.utils",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "pakages.builders.spack.cache.oras.get_oras_client",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.oras",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "pakages.builders.spack.cache.BuildCache",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.utils.stream_command",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.utils",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "pakages.logger.logger.info",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "pakages.logger.logger",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "pakages.builders.spack.cache.utils.stream_command",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.utils",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "pakages.logger.logger.info",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "pakages.logger.logger",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "pakages.builders.spack.cache.utils.stream_command",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "pakages.builders.spack.cache.utils",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "pakages.builders.spack.cache",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "pakages.logger.logger.info",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "pakages.logger.logger",
"line_number": 176,
"usage_type": "name"
}
] |
42111163390
|
from fastapi import Body, FastAPI
from pydantic import BaseModel
from typing import Annotated
from enum import Enum
app = FastAPI()
class ModelName(str, Enum):
afs = "afs"
har = "har1"
class Item(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
tags: set[str] = set()
fake_items_db = [{"item_name": "Foo"}, {"item_name": "Bar"}, {"item_name": "Baz"}]
@app.post("/items/create_item/")
async def create_items(item: Item):
item_dict = item.model_dump()
if item.tax:
price_with_tax = item.price + item.tax
item_dict.update({"price with tax": price_with_tax})
return item_dict
@app.get("/")
async def home():
return {"Data": "Test"}
@app.get("/items/")
async def read_item(skip: int = 0, limit: int = 10):
return fake_items_db[skip: skip + limit]
@app.put("/add_items/{item_id}")
async def add_item(item_id: int, item: Item):
return {"item_id": item_id, **item.model_dump()}
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Annotated[Item, Body(examples={"name": "foo", "description": "cool item", "price": "24", "tax": 3})]):
result = {"item_id": item_id, "item": item}
return result
@app.get("/models/{model_name}")
async def get_model(model_name: ModelName):
if model_name is ModelName.afs:
return {"model_name": model_name, "message": 1}
if model_name.value == "har":
return {"model_name": model_name, "message": 2}
return {"model_name": model_name, "message": -1}
@app.get("/files/{file_path:path}")
async def read_file(file_path: str):
return {"file_path": file_path}
|
mkilic20/task
|
testing.py
|
testing.py
|
py
| 1,663 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.FastAPI",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Annotated",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "fastapi.Body",
"line_number": 51,
"usage_type": "call"
}
] |
11332000472
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 1 10:10:45 2021
@author: 82106
"""
import cv2
import os
import sys
if not os.path.exists('result'):
os.makedirs('result')
capture = cv2.VideoCapture(1)
if not capture.isOpened():
print('Camera open failed!')
sys.exit()
'''
frameWidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(capture.get(cv2.CAP_PROP_FRMAE_HEIGHT))
frameSize = (frameWidth, frameHeight)
print('frame size : {}'.format(frameSize))
'''
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
count = 1
while True:
ret, frame = capture.read()
if not ret:
print('Frame read error!')
sys.exit()
cv2.imshow('frame', frame)
key = cv2.waitKey(1)
if key == ord('s'):
print('Screenshot saved!')
cv2.imwrite('result/screenshot{}.png'.format(count), frame, params=[cv2.IMWRITE_PNG_COMPRESSION, 0])
count += 1
elif key == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
|
dongwooky/Personal-Project
|
container/camera_screenshot.py
|
camera_screenshot.py
|
py
| 1,084 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_HEIGHT",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cv2.IMWRITE_PNG_COMPRESSION",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 52,
"usage_type": "call"
}
] |
44602770515
|
import pytesseract
import PIL
from os import system
import re
system("tesseract -l")
class workout:
reps = 0
exercise_name = ""
def compile_text_to_workouts(text):
workouts = []
num = 0
for word in text:
new_workout = workout()
if word.isdigit():
new_workout.reps = word
num+=1
while num < len(text) and not text[num].isdigit() :
new_workout.exercise_name += " " + str(text[num])
num +=1
if not new_workout.reps == 0 or not new_workout.exercise_name == "":
workouts.append(new_workout)
return workouts
####MAIN:###############################################################
letters = (pytesseract.image_to_string(r'../GetFit/workout_routine1.png'))
print(letters)
sentence = re.findall(r'\w+', letters) ##turns letters into words and makes list
print(sentence)
compile_text_to_workouts(sentence) ###turns into actual workout routine
|
reeyagup/GetFit
|
image_to_text.py
|
image_to_text.py
|
py
| 972 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.system",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pytesseract.image_to_string",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 35,
"usage_type": "call"
}
] |
19040286888
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from rl_nav import constants
from rl_nav.environments import wrapper
try:
import cv2
import matplotlib
from matplotlib import cm
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ModuleNotFoundError:
raise AssertionError(
"To use visualisation wrapper, further package requirements "
"need to be satisfied. Please consult README."
)
class VisualisationEnv(wrapper.Wrapper):
COLORMAP = cm.get_cmap("plasma")
NORMALISE = False
def __init__(self, env):
super().__init__(env=env)
def render(
self,
save_path: Optional[str] = None,
dpi: Optional[int] = 60,
format: str = "state",
) -> None:
"""Method to render environment.
Args:
save_path: optional path to which to save image.
dpi: optional pixel.
format: state of environment to render.
"""
if format == constants.STATE:
assert (
self._env.active
), "To render map with state, environment must be active."
"call reset_environment() to reset environment and make it active."
"Else render stationary environment skeleton using format='stationary'"
if save_path:
fig = plt.figure()
plt.imshow(
self._env._env_skeleton(
rewards=format,
agent=format,
),
origin="lower",
)
fig.savefig(save_path, dpi=dpi)
else:
plt.imshow(
self._env._env_skeleton(
rewards=format,
agent=format,
),
origin="lower",
)
def visualise_episode_history(
self, save_path: str, history: Union[str, List[np.ndarray]] = "train"
) -> None:
"""Produce video of episode history.
Args:
save_path: name of file to be saved.
history: "train", "test" to plot train or test history,
else provide an independent history.
"""
if isinstance(history, str):
if history == constants.TRAIN:
history = self._env.train_episode_history
elif history == constants.TEST:
history = self._env.test_episode_history
elif history == constants.TRAIN_PARTIAL:
history = self._env.train_episode_partial_history
elif history == constants.TEST_PARTIAL:
history = self._env.test_episode_partial_history
SCALING = 20
FPS = 30
map_shape = history[0].shape
frameSize = (SCALING * map_shape[1], SCALING * map_shape[0])
out = cv2.VideoWriter(
filename=save_path,
fourcc=cv2.VideoWriter_fourcc("m", "p", "4", "v"),
fps=FPS,
frameSize=frameSize,
)
for frame in history:
bgr_frame = frame[..., ::-1].copy()
flipped_frame = np.flip(bgr_frame, 0)
scaled_up_frame = np.kron(flipped_frame, np.ones((SCALING, SCALING, 1)))
out.write((scaled_up_frame * 255).astype(np.uint8))
out.release()
def _plot_normalised_heatmap_over_env(
self, heatmap: Dict[Tuple[int, int], float], save_name: str
):
split_save_name = save_name.split(".pdf")[0]
save_name = f"{split_save_name}_normalised.pdf"
environment_map = self._env._env_skeleton(
rewards=None,
agent=None,
)
all_values = list(heatmap.values())
current_max_value = np.max(all_values)
current_min_value = np.min(all_values)
for position, value in heatmap.items():
# remove alpha from rgba in colormap return
# normalise value for color mapping
environment_map[position[::-1]] = self.COLORMAP(
(value - current_min_value) / (current_max_value - current_min_value)
)[:-1]
fig = plt.figure()
plt.imshow(environment_map, origin="lower", cmap=self.COLORMAP)
plt.colorbar()
fig.savefig(save_name, dpi=60)
plt.close()
def _plot_unnormalised_heatmap_over_env(
self, heatmap: Dict[Tuple[int, int], float], save_name: str
):
environment_map = self._env._env_skeleton(
rewards=None,
agent=None,
)
for position, value in heatmap.items():
# remove alpha from rgba in colormap return
environment_map[position[::-1]] = self.COLORMAP(value)[:-1]
fig = plt.figure()
plt.imshow(environment_map, origin="lower", cmap=self.COLORMAP)
plt.colorbar()
fig.savefig(save_name, dpi=60)
plt.close()
def plot_heatmap_over_env(
self,
heatmap: Dict[Tuple[int, int], float],
save_name: str,
) -> None:
"""plot quantities over top of environmen (e.g. value function)
Args:
heatmap: data to plot; dictionary of states (keys) and quantities (values).
fig: figure on which to plot.
ax: axis on which to plot.
save_name: path to which to save plot.
"""
self._plot_unnormalised_heatmap_over_env(heatmap=heatmap, save_name=save_name)
self._plot_normalised_heatmap_over_env(heatmap=heatmap, save_name=save_name)
def plot_numbered_values_over_env(
self, values: Dict[Tuple[int], np.ndarray], save_name: str
):
fig = plt.figure()
environment_map = self._env._env_skeleton(
rewards=None,
agent=None,
)
plt.imshow(environment_map, origin="lower", cmap=self.COLORMAP)
all_states = list(values.keys())
for state, action_values in values.items():
for i, action_value in enumerate(action_values):
if all_states[i] != state:
xytext = np.array(state) + 0.2 * (
np.array(all_states[i]) - np.array(state)
)
plt.annotate(
f"{i}: {round(action_value, 2)}",
xy=state,
xytext=xytext,
arrowprops={
"headlength": 2,
"headwidth": 2,
"width": 0.5,
"linewidth": 0.1,
},
color="y",
size=5,
)
else:
plt.annotate(
i,
xy=state,
color="g",
size=5,
)
fig.savefig(save_name, dpi=60)
plt.close()
|
philshams/Euclidean_Gridworld_RL
|
rl_nav/environments/visualisation_env.py
|
visualisation_env.py
|
py
| 6,969 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "rl_nav.environments.wrapper.Wrapper",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "rl_nav.environments.wrapper",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.cm.get_cmap",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "rl_nav.constants.STATE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "rl_nav.constants",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "rl_nav.constants.TRAIN",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "rl_nav.constants",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "rl_nav.constants.TEST",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "rl_nav.constants",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "rl_nav.constants.TRAIN_PARTIAL",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "rl_nav.constants",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "rl_nav.constants.TEST_PARTIAL",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "rl_nav.constants",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "cv2.VideoWriter",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.flip",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.kron",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "numpy.max",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 205,
"usage_type": "name"
}
] |
20093575148
|
# General
import os
# Tools/utils
import itertools
import multiprocessing
from tqdm.notebook import tqdm
from tqdm import tqdm as tqdm_cli
from functools import reduce # for aggregate functions
from itertools import chain # for aggregate functions
# Data management
import math
import numpy as np
import pandas as pd
import networkx as nx
import igraph as ig
import leidenalg as la
from community import community_louvain
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
import pygraphviz as pgv
import colorcet as cc
from matplotlib.colors import ListedColormap
from wordcloud import WordCloud, STOPWORDS
from termcolor import colored # colored text output
from sklearn.preprocessing import MinMaxScaler
stopwords = STOPWORDS.union({
'regulation', 'activity', 'positive', 'negative', 'catabolic', 'process', 'protein', 'complex',
'binding', 'response', 'gene', 'genes', 'encoding', 'defining', 'GeneID', 'regulated',
})
def get_tf_targ_ctx(df):
tf_target_dict = {'TF': [], 'target': [], 'importance': []}
tf_target_info = (
df.droplevel(axis=0, level=1).droplevel(axis=1, level=0)['TargetGenes']
.map(set) # transform each list into set
.groupby('TF').agg(lambda x: reduce(lambda a, b: a.union(b), x)) # combine all targets per TF
)
for tf, target_info in tf_target_info.iteritems():
tf_target_dict['TF'] += [tf for target_name, score in target_info]
tf_target_dict['target'] += [target_name for target_name, score in target_info]
tf_target_dict['importance'] += [score for target_name, score in target_info]
return pd.DataFrame(tf_target_dict)
def netgraph_community_layout(G, node_to_community, community_scale=1., node_scale=2., seed=42):
"""
Compute the node positions for a modular graph.
"""
# assert that there multiple communities in the graph; otherwise abort
communities = set(node_to_community.values())
if len(communities) < 2:
warnings.warn("Graph contains a single community. Unable to compute a community layout. Computing spring layout instead.")
return nx.spring_layout(G, weight='importance', **kwargs)
community_size = _get_community_sizes(node_to_community)
community_centroids = _get_community_positions(G, node_to_community, community_scale, seed=seed)
relative_node_positions = _get_node_positions(G, node_to_community, node_scale, seed=seed)
# combine positions
node_positions = dict()
for node, community in node_to_community.items():
xy = community_centroids[node]
delta = relative_node_positions[node] * community_size[community]
node_positions[node] = xy + delta
return node_positions
def _get_community_sizes(node_to_community):
"""
Compute the area of the canvas reserved for each community.
"""
def _invert_dict(mydict):
"""Invert a dictionary such that values map to keys."""
inverse = dict()
for key, value in mydict.items():
inverse.setdefault(value, set()).add(key)
return inverse
scale = (1, 1)
total_nodes = len(node_to_community)
max_radius = np.linalg.norm(scale) / 2
scalar = max_radius / total_nodes
community_to_nodes = _invert_dict(node_to_community)
community_size = {community : len(nodes) * scalar for community, nodes in community_to_nodes.items()}
return community_size
def _get_community_positions(G, node_to_community, community_scale, seed, simple=True):
"""
Compute a centroid position for each community.
"""
# create a weighted graph, in which each node corresponds to a community,
# and each edge weight to the number of edges between communities
between_community_edges = _find_between_community_edges(G, node_to_community)
communities = set(node_to_community.values())
hypergraph = nx.DiGraph()
hypergraph.add_nodes_from(communities)
if not simple:
for (ci, cj), edges in between_community_edges.items():
hypergraph.add_edge(ci, cj, weight=len(edges))
# find layout for communities
pos_communities = nx.spring_layout(hypergraph, scale=community_scale, seed=seed)
# set node positions to position of community
pos = dict()
for node, community in node_to_community.items():
pos[node] = pos_communities[community]
return pos
def _find_between_community_edges(G, node_to_community, fixed_community=None):
"""Convert the graph into a weighted network of communities."""
edges = dict()
for (ni, nj) in G.edges():
ci = node_to_community[ni]
cj = node_to_community[nj]
if fixed_community is not None:
if fixed_community != ci and fixed_community != cj:
continue
if ci != cj:
try:
edges[(ci, cj)] += [(ni, nj)]
except KeyError:
edges[(ci, cj)] = [(ni, nj)]
return edges
def _get_node_positions(G, node_to_community, node_scale, seed):
"""
Positions nodes within communities.
"""
communities = dict()
for node, community in node_to_community.items():
try:
communities[community] += [node]
except KeyError:
communities[community] = [node]
pos = dict()
for ci, nodes in communities.items():
subgraph = G.subgraph(nodes)
pos_subgraph = nx.spring_layout(subgraph, weight='importance', scale=node_scale, seed=seed)
pos.update(pos_subgraph)
return pos
def squeeze_graph(G, partition, approximate_size=4000):
"""
Squeeze graph by picking only top nodes (according to number of connections) in each partition. This
step is needed to speed up the networkx visualization and show only the general POV on the graph.
"""
#### STEP 1 - filtering nodes
# Getting the number of partitions
num_partitions = len(set(partition.values()))
# Getting partition parameters
partition_sizes = {i: len([1 for node, k in partition.items() if k == i]) for i in range(num_partitions)}
min_partition_size = min(partition_sizes.values())
# Normalizing partition size: divide each partition size by the minimal partition size
normalized_partition_size = {i: (size // min_partition_size) for i, size in partition_sizes.items()}
# Getting scale factor - to get approximately size of the graph close to approximate_size
scale_factor = math.ceil(approximate_size / sum(normalized_partition_size.values()))
squeezed_partition = {i: (size * scale_factor) for i, size in normalized_partition_size.items()}
top_nodes = []
for i, num_nodes in squeezed_partition.items():
# Getting partition graph
partition_i = G.subgraph([node for node, k in partition.items() if k == i])
# Finding inter-community edges
intercommunity_edges = _find_between_community_edges(G, partition, i)
# Calculating node importance according to number of inter-community edges
node_importance = {}
for (part_1, part_2), edges in intercommunity_edges.items():
for node_1, node_2 in edges:
curr_node = node_1 if part_1 == i else node_2
if curr_node in node_importance:
node_importance[curr_node] += 1
else:
node_importance[curr_node] = 1
# Getting top nodes in the partition according to maximum number of inter-community edge (node_importance)
top_nodes += list(dict(sorted(node_importance.items(), key=lambda x: x[1], reverse=True)[:squeezed_partition[i]]).keys())
filtered_partition = {node: i for node, i in partition.items() if node in top_nodes}
filtered_G = G.subgraph(top_nodes)
#### STEP 2 - filtering edges
# Setting up the size of the squeezed graph (number of edges)
keep_num_edges = 20000
edges_to_keep = \
list(
dict(
sorted(
{
(st, end): data['importance'] for st, end, data in filtered_G.edges(data=True)
}.items(), key=lambda x: x[1], reverse=True)[:keep_num_edges]
).keys()
)
squeezed_G = filtered_G.edge_subgraph(edges_to_keep)
squeezed_partition = {node: i for node, i in filtered_partition.items() if node in squeezed_G.nodes()}
return squeezed_G, squeezed_partition
def get_elipsis_mask():
h, w = 600, 800
center = (int(w/2), int(h/2))
radius_x = w // 2
radius_y = h // 2
Y, X = np.ogrid[:h, :w]
mask = ((X - center[0])**2/radius_x**2 + (Y - center[1])**2/radius_y**2 >= 1)*255
return mask
def plot_cloud(G, partition, squeezed_pos, ax, anno_db, filter_genes=True,
limit_anno_until=50, display_func=False, if_betweenness=True,
k=3000):
"""
Plot word cloud that indicates the function(s) of each gene cluster.
"""
# Loading the gene functional annotation
gene_func = load_gene_func_db(anno_db, reload=False, as_series=True)
# Reversing partition dict -> {group_1: [gene_1, gene_2, ...], group_2: [gene_3, gene_4, ...], ...}
partition_genes_ = {}
for gene, i in partition.items():
if i not in partition_genes_.keys():
partition_genes_[i] = [gene]
else:
partition_genes_[i] += [gene]
# If display gene function in the word clouds
if display_func:
# Whether to filter the genes on which we compute the word cloud (most important genes)
if filter_genes:
compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality
distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'}
partition_genes = {}
t = tqdm(partition_genes_.items())
for i, genes in t:
t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}')
top_len = min(limit_anno_until, len(genes))
top_gene_scores = dict(
sorted(
compute_centrality(
G.subgraph(genes), k=min(G.subgraph(genes).order(), k), **distance_metric
).items(),
key=lambda x: x[1], reverse=True
)[:top_len]
)
# Renormalizing centrality scores between 1 and 100, and rounding them to use later when
# displaying wordclouds (higher score - higher "frequency" or word size)
norm_top_gene_scores = dict(
zip(
top_gene_scores.keys(), list(map(lambda x: int(x), scale(list(top_gene_scores.values()), 1, 100)))
)
)
partition_genes[i] = norm_top_gene_scores
print('Filtered genes for generating the function word cloud..')
else:
partition_genes = {{gene_: 1 for gene_ in gene_list} for i, gene_list in partition_genes_.items()}
# Computing functional annotation for each cluster as a concatenated list of annotations
# Each annotation is weighted by its duplication gene_score times (e.g. a gene has score = 2 ->
# the functional annotation is duplicated and have bigger font in WordCloud)
partition_funcs = {
i: ' '.join(
chain.from_iterable([
gene_func[gene_func.index == gene].to_list()*gene_score
for gene, gene_score in gene_score_list.items()
])) for i, gene_score_list in partition_genes.items()
}
# Generating word counts from aggregated gene annotation texts -> obtaining main (most frequent) function tokens
word_counts = {i: WordCloud(max_words=30, min_font_size=15, stopwords=stopwords).process_text(text) for i, text in partition_funcs.items()}
word_counts = {
i: (freqs if freqs else {'no found function': 1}) for i, freqs in word_counts.items()
} # dealing with no word case
wordclouds = {
i: WordCloud(
max_words=30, min_font_size=15, stopwords=stopwords, background_color='white', mask=get_elipsis_mask()
).generate_from_frequencies(freqs) for i, freqs in word_counts.items()
}
# Display main genes in decreasing order of importance (top `top_len` genes)
else:
compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality
distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'}
partition_genes = {}
t = tqdm(partition_genes_.items())
for i, genes in t:
t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}')
top_len = min(limit_anno_until, len(genes))
top_gene_scores = dict(
sorted(
compute_centrality(
G.subgraph(genes), k=min(G.subgraph(genes).order(), k), **distance_metric
).items(),
key=lambda x: x[1], reverse=True
)[:top_len]
)
# Renormalizing centrality scores between 1 and 100, and rounding them to use later when
# displaying wordclouds (higher score - higher "frequency" or word size)
norm_top_gene_scores = dict(
zip(
top_gene_scores.keys(), list(map(lambda x: int(x), scale(list(top_gene_scores.values()), 1, 100)))
)
)
partition_genes[i] = norm_top_gene_scores
print('Obtained top genes for generating the gene word cloud..')
wordclouds = {
i: WordCloud(
max_words=30, min_font_size=15, background_color='white', mask=get_elipsis_mask()
).generate_from_frequencies(gene_score_dict) for i, gene_score_dict in partition_genes.items()
}
# Plotting
partition_coords = {}
for gene, coords in squeezed_pos.items():
if partition[gene] not in partition_coords:
partition_coords[partition[gene]] = [coords]
else:
partition_coords[partition[gene]] += [coords]
for i, coords in partition_coords.items():
x, y = zip(*coords)
min_x, max_x = min(x), max(x)
min_y, max_y = min(y), max(y)
ax.imshow(wordclouds[i], interpolation='bilinear', extent=[min_x, max_x, min_y, max_y])
return ax
def process_communities(data, pat=None, algo='leiden', filter_quantile=0.95, if_betweenness=True,
limit_anno_until=50, k=5000, save_top_intercommunity_links_until=20,
other_functions_until=20, save_top_new_found_cluster_links=20, seed=42):
"""
Process graph by finding its communities, annotate its communities, and save everything into .tsv format.
"""
from joblib import Parallel, delayed
def highlight_TFs(word, font_size, position, orientation, font_path, random_state):
TF_color = (255, 0, 0) # red
if word in lambert_TF_names or word in dorothea_TF_names:
return TF_color
else:
r, g, b, alpha = plt.get_cmap('viridis')(font_size / 120)
return (int(r * 255), int(g * 255), int(b * 255))
print('\nPerforming community analysis..\n\n')
# Setting pathways to files
_PROJ_PATH = '/gpfs/projects/bsc08/bsc08890'
_FMETA = os.path.join(_PROJ_PATH, 'data/GSE145926_RAW/metadata.tsv')
_DATA_HOME = os.path.join(_PROJ_PATH, 'res/covid_19')
# Loading sample meta data, reordering patients
full_meta = pd.read_csv(_FMETA, sep='\t', index_col=0)
# Prepare everything to save the figs and dataframe
if data == 'all_data':
data = 'raw_data'
elif 'raw_data_' not in data:
data = f'raw_data_{data}'
else:
pass
if pat is None or pat == 'all_data':
# Cell-type aggregated data
data_folder = 'all_data' if data == 'raw_data' else data.replace('raw_data_', '')
figs_as = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'figs', 'grnboost2', f'raw_data')
data_to = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'data', 'grnboost2', f'{algo}_communities')
data_as = os.path.join(data_to, f'raw_data_communities_info.pickle')
elif pat in ['C', 'M', 'S']:
# Patient-type aggregated data
data_folder = 'all_data' if data == 'raw_data' else data.replace('raw_data_', '')
figs_as = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'figs', 'grnboost2',
f'raw_data_{pat}_type')
data_to = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'data', 'grnboost2', f'{algo}_communities')
data_as = os.path.join(data_to, f'raw_data_{pat}_type_communities_info.pickle')
else:
# Loading patient-specific data
figs_as = os.path.join(_DATA_HOME, pat, 'figs', 'grnboost2', f'{data}')
data_to = os.path.join(_DATA_HOME, pat, 'data', 'grnboost2', f'{algo}_communities')
data_as = os.path.join(data_to, f'{data}_communities_info.pickle')
os.makedirs(data_to, exist_ok=True)
os.makedirs(os.path.dirname(figs_as), exist_ok=True)
# Loading lists of TFs from Lambert 2018 and DoRothEA, in the latter case we will keep only confident regulons
lambert_TF_names = pd.read_csv(os.path.join(_PROJ_PATH, 'data/TF_lists/lambert2018.txt'), header=None)[0].to_list()
dorothea_TF_names = list(
pd.read_csv(os.path.join(_PROJ_PATH, 'data/TF_lists/dorothea_regulons.tsv'), sep='\t') \
.loc[lambda x: x['confidence'].isin(['A', 'B', 'C'])]['tf'].unique()
)
# Loading the graph
G = get_nx_graph(data=data, data_type='all', pat=pat, get_filtered=filter_quantile)
print(f"Loaded the graph: {colored('pat', 'green')}='{colored(pat, 'red')}', "
f"{colored('data', 'green')}='{colored(data, 'red')}', "
f"{colored('data_type', 'green')}='{colored('all', 'red')}'\n")
###### FINDING COMMUNITIES IN THE GRAPH #######
print('Finding communities in the graph..')
if algo == 'louvain':
partition = community_louvain.best_partition(G.to_undirected(), weight='importance', random_state=seed)
elif algo == 'leiden':
G_igraph = ig.Graph.from_networkx(G.to_undirected())
la_partition = la.find_partition(G_igraph, la.ModularityVertexPartition, weights='importance', seed=seed)
partition = {G_igraph.vs[node]['_nx_name']: i for i, cluster_nodes in enumerate(la_partition) for node in cluster_nodes}
else:
raise NotImplementedError
num_partitions = len(set(partition.values()))
print(f'Number of partitions using {algo} algorithm: {colored(num_partitions, "cyan")}\n')
###### FINDING HIGH-CENTRALITY GENES IN THE WHOLE GRAPH
print('Finding high-centrality genes in the whole graph..')
num_workers = max(multiprocessing.cpu_count() // 2, 1)
whole_G_central_genes = dict(
sorted(betweenness_centrality_parallel(G, processes=num_workers).items(), key=lambda x: x[1], reverse=True)[:limit_anno_until]
)
print(f'Computed the {"betweenness" if if_betweenness else "closeness"} centrality for all genes in the graph\n')
###### FINDING HIGH-CENTRALITY GENES AND CORRESPONDING FUNCTIONS IN EACH COMMUNITY USING GO ANNOTATION ######
print('Finding high-centrality genes/functions in each cluster..')
# Loading the gene functional annotation
anno_db_tags = ['GO', 'KEGG', 'immunological', 'hallmark']
gene_func_dbs = {tag: load_gene_func_db(tag, as_series=True) for tag in anno_db_tags}
# Reversing partition dict -> {group_1: [gene_1, gene_2, ...], group_2: [gene_3, gene_4, ...], ...}
partition_genes_ = {}
for gene, i in partition.items():
if i not in partition_genes_.keys():
partition_genes_[i] = [gene]
else:
partition_genes_[i] += [gene]
# Whether to filter the genes on which we compute the word cloud (most important genes)
compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality
distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'}
all_partition_genes = {}
norm_partition_genes = {}
t = tqdm_cli(partition_genes_.items(), ascii=True)
for i, genes in t:
t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}')
gene_scores = dict(
sorted(
compute_centrality(
G.subgraph(genes), k=min(G.subgraph(genes).order(), k), normalized=True, **distance_metric
).items(),
key=lambda x: x[1], reverse=True
)
)
all_partition_genes[i] = gene_scores
central_gene_scores = {gene: gene_scores[gene] for k, gene in enumerate(gene_scores.keys()) if k < limit_anno_until}
# Renormalizing centrality scores between 1 and 100, and rounding them to use later when
# displaying wordclouds (higher score - higher "frequency" or word size)
norm_partition_genes[i] = dict(
zip(
central_gene_scores.keys(),
list(map(lambda x: int(x), scale(list(central_gene_scores.values()), 1, 100)))
)
)
print('Computed centrality scores for each gene in each community\n')
print('Finding functional annotations for each cluster..')
# Computing functional annotation for each cluster as a concatenated list of annotations
# Each annotation is weighted by its duplication gene_score times (e.g. a gene has score = 2 ->
# the functional annotation is duplicated and have bigger font in WordCloud)
# We also do it for different functional annotations like GO, KEGG, Hallmark, etc..
partition_funcs = {
tag:
{
i: ' '.join(
chain.from_iterable([
gene_func[gene_func.index == gene].to_list()*gene_score
for gene, gene_score in gene_score_list.items()
])) for i, gene_score_list in norm_partition_genes.items()
} for tag, gene_func in gene_func_dbs.items()
}
print('Computed functional annotations for each cluster\n')
###### PLOTTING GENE AND FUNC COMMUNITY CLOUDS ######
print('Plotting clusters..')
# Getting positions of squeezed graph - we do not plot every gene on the figure
squeezed_G, squeezed_partition = squeeze_graph(G, partition)
print('Computed a squeezed graph representation..')
squeezed_pos = netgraph_community_layout(squeezed_G, squeezed_partition, seed=seed) # nx.nx_agraph.pygraphviz_layout(G.to_undirected(), prog="sfdp") # nx.nx.spring_layout(G, seed=seed, k=0.2, iterations=20)
partition_coords = {}
for gene, coords in squeezed_pos.items():
if partition[gene] not in partition_coords:
partition_coords[partition[gene]] = [coords]
else:
partition_coords[partition[gene]] += [coords]
print('Computed node positions of the squeezed graph representation..')
cmap = ListedColormap(sns.color_palette(cc.glasbey_bw, n_colors=num_partitions).as_hex())
for plot_type in ['genes'] + list(map(lambda x: f"func_{x}", anno_db_tags)):
if plot_type.startswith('func'):
# Getting current functional annotation
curr_partition_funcs = partition_funcs[plot_type[plot_type.find('_') + 1:]]
f, ax = plt.subplots(figsize=(20, 35))
if plot_type == 'genes':
wordclouds = {
i: WordCloud(
max_words=30, min_font_size=15, background_color='white', mask=get_elipsis_mask()
).generate_from_frequencies(gene_score_dict).recolor(color_func=highlight_TFs)
for i, gene_score_dict in norm_partition_genes.items()
}
else:
word_counts = {
i: WordCloud(max_words=30, min_font_size=15, stopwords=stopwords).process_text(text) for i, text in curr_partition_funcs.items()
}
word_counts = {
i: (freqs if freqs else {'no found function': 1}) for i, freqs in word_counts.items()
} # dealing with no word case
wordclouds = {
i: WordCloud(
max_words=30, min_font_size=15, stopwords=stopwords, background_color='white', mask=get_elipsis_mask()
).generate_from_frequencies(freqs) for i, freqs in word_counts.items()
}
# Plotting clouds
for i, coords in partition_coords.items():
x, y = zip(*coords)
min_x, max_x = min(x), max(x)
min_y, max_y = min(y), max(y)
ax.imshow(wordclouds[i], interpolation='bilinear', extent=[min_x, max_x, min_y, max_y])
print(f'Finished plotting {plot_type} word cloud..')
nx.draw(squeezed_G, squeezed_pos, ax=ax, arrowstyle="->", arrowsize=20,
connectionstyle=f'arc3, rad = 0.25', edge_color='gray', width=0.4,
node_color='k', node_size=50, alpha=0.02)
nx.draw_networkx_nodes(squeezed_G, squeezed_pos, ax=ax, node_size=100,
nodelist=list(squeezed_partition.keys()),
node_color=list(squeezed_partition.values()),
cmap=cmap, alpha=0.005)
print(f'Finished plotting {plot_type} nodes..')
ax.set_title(f'Found communities ({pat}, "all", {data}), '
f'annotation - {plot_type}',
fontsize=30)
plt.axis('off')
plt.savefig(f'{figs_as}_{plot_type}.png', bbox_inches='tight', dpi=400)
print('Finished plotting..\n')
###### SAVING DATAFRAME CONTAINING INFORMATION ABOUT EACH COMMUNITY ######
def compute_community_info(i):
"""
Parallel saving of the dataframe.
"""
# Getting information for each community
genes = list(all_partition_genes[i].keys())
community_subgraph = G.subgraph(genes)
communities_i = pd.Series(dtype='object')
# Setting tqdm logs
# t.set_description(f'Saving info about {i} cluster, size={community_subgraph.order()}')
# Getting information about cluster genes
central_genes_and_scores = {
gene: all_partition_genes[i][gene] for k, gene in enumerate(genes) if k < limit_anno_until
}
non_lambert_TFs = [
f'{gene} (rank={k})' for k, gene in enumerate(central_genes_and_scores.keys(), start=1) if gene not in lambert_TF_names
]
non_dorothea_TFs = [
f'{gene} (rank={k})' for k, gene in enumerate(central_genes_and_scores.keys(), start=1) if gene not in dorothea_TF_names
]
# Filling dataframe with the information
communities_i['num_nodes'] = community_subgraph.number_of_nodes()
communities_i['num_edges'] = community_subgraph.number_of_edges()
communities_i['all_sorted_genes'] = '; '.join(
f'{gene} (score={score})' for gene, score in all_partition_genes[i].items()
)
communities_i['sorted_central_genes_scores'] = '; '.join(
f'{gene} (score={score:.2f})' for gene, score in central_genes_and_scores.items()
)
communities_i['non_lambert_2018_TF_central_genes'] = '; '.join(non_lambert_TFs)
communities_i['non_dorothea_TF_central_genes'] = '; '.join(non_dorothea_TFs)
communities_i['whole_G_central_genes_scores'] = '; '.join(
f'{gene} (score={score:.2f})' for gene, score in whole_G_central_genes.items()
)
# Filling information about newly found gene-gene links (based on absence in KEGG and Hallmark)
top_cluster_links = set()
iter_i = 0
for st, end, edge_info in sorted(community_subgraph.edges(data=True),
key=lambda t: t[2]['importance'],
reverse=True):
# If the current (reverse directed) link was not encountered previously..
if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in top_cluster_links]:
top_cluster_links.add((st, end, edge_info['importance']))
iter_i += 1
if iter_i == save_top_new_found_cluster_links:
break
for anno_tag in ['KEGG', 'hallmark']:
curr_db = load_gene_func_db(anno_tag)
tmp_list = []
# if `st` gene and `end` gene have non-overlapping annotations..
for st, end, imp in top_cluster_links:
st_anno_IDs = set(curr_db[curr_db.index == st]['ID'])
end_anno_IDs = set(curr_db[curr_db.index == end]['ID'])
if len(st_anno_IDs.intersection(end_anno_IDs)) == 0 and \
(len(st_anno_IDs) != 0 or len(end_anno_IDs) != 0):
tmp_list.append(f"{st} ({' & '.join(st_anno_IDs)}) <-> {end} ({' & '.join(end_anno_IDs)})")
communities_i[f'new_gene_gene_links_{anno_tag}'] = '; '.join(tmp_list)
# Filling information about cluster functions
for tag, gene_func in gene_func_dbs.items():
curr_partition_funcs = partition_funcs[tag]
# Filling main functions - non duplicates at the top
main_functions = list(dict.fromkeys([ # dropping duplicates, but preserving order
func for gene in central_genes_and_scores.keys()
for func in gene_func[gene_func.index == gene].to_list()
]))
gene_with_main_functions = [
','.join(
gene_func[gene_func == func].loc[lambda x: x.index.isin(genes)].index.to_list()
) for func in main_functions
]
main_functions = [
f'>>> {func} <<<: {gene}' for gene, func in zip(gene_with_main_functions, main_functions)
]
communities_i[f'main_functions_{tag}'] = '; '.join(main_functions) # saving..
# Saving functions corresponding to each gene
central_functions_per_gene = [
f">>> {gene} <<<: {' & '.join(gene_func[gene_func.index == gene].to_list())}" for gene in central_genes_and_scores.keys()
]
communities_i[f'sorted_central_functions_{tag}'] = '; '.join(central_functions_per_gene) # saving..
# Saving most frequent function words
freq_words = WordCloud(
max_words=30, min_font_size=15, stopwords=stopwords
).process_text(curr_partition_funcs[i])
freq_words = dict(
sorted(freq_words.items(), key=lambda x: x[1], reverse=True)
) if freq_words else {'no found function': 1} # dealing with no word case
communities_i[f'most_frequent_function_words_{tag}'] = '; '.join(freq_words.keys()) # saving
# Saving other functions present in this cluster
other_functions = list(dict.fromkeys([ # dropping duplicates, but preserving order
func for gene in genes if gene not in central_genes_and_scores.keys()
for func in gene_func[gene_func.index == gene].to_list() if func not in main_functions
]))[:other_functions_until]
genes_with_other_functions = [
','.join(
gene_func[gene_func == func].loc[lambda x: x.index.isin(genes)].index.to_list()
) for func in other_functions
]
other_functions = [
f'>>> {func} <<<: {gene}' for gene, func in zip(genes_with_other_functions, other_functions)
]
communities_i[f'other_functions_{tag}'] = '; '.join(other_functions) # saving
# Filling information about top inter-community links
# t_sub = tqdm(range(num_partitions), ascii=True, leave=False)
for k in range(num_partitions): # t_sub:
# t_sub.set_description(f'Extracting top inter-community links with {k}')
if i != k:
genes_in_k = list(all_partition_genes[k].keys())
# Getting the subgraph that contains central genes in community_i and all genes in comunity_k
G_central_i_k = G.subgraph(list(central_genes_and_scores.keys()) + genes_in_k)
# Getting the subgraph that contains all genes from community_i and community_k
G_i_k = G.subgraph(genes + genes_in_k)
# Creating two helper sets that allow us to keep only unique links
links_central_i_k = set()
links_i_k = set()
iter_i = 0
# Getting out top links from the second subgraph
for st, end, edge_info in sorted(G_central_i_k.edges(data=True),
key=lambda t: t[2]['importance'],
reverse=True):
# If the current (reverse directed) link was not encountered previously..
if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in links_central_i_k] and \
((st in genes and end not in genes) or (end in genes and st in genes)):
links_central_i_k.add((st, end, edge_info['importance']))
iter_i += 1
if iter_i == save_top_intercommunity_links_until:
break
iter_i = 0
# Getting out top links from the second subgraph
for st, end, edge_info in sorted(G_i_k.edges(data=True),
key=lambda t: t[2]['importance'],
reverse=True):
# If the current (reverse directed) link was not encountered previously..
if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in links_i_k] and \
((st in genes and end not in genes) or (end in genes and st in genes)):
links_i_k.add((st, end, edge_info['importance']))
iter_i += 1
if iter_i == save_top_intercommunity_links_until:
break
# Adding top links to the dataframe
communities_i[f'top_links_scores_central_genes<->community_{k}'] = \
'; '.join(f'{st} <-> {end} (score={score:.2f})' for st, end, score in links_central_i_k)
communities_i[f'top_links_scores_with_community_{k}'] = \
'; '.join([f'{st} <-> {end} (score={score:.2f})' for st, end, score in links_i_k])
return communities_i
print('Saving info dataframe..')
t = tqdm_cli(range(num_partitions), ascii=True)
# Getting dataframe
result = Parallel(n_jobs=num_workers)(delayed(compute_community_info)(i) for i in t)
communities_df = pd.concat(result, axis=1).T.reindex(
columns=[
'num_nodes', 'num_edges',
'main_functions_GO', 'main_functions_KEGG', 'main_functions_immunological', 'main_functions_hallmark',
'non_lambert_2018_TF_central_genes', 'non_dorothea_TF_central_genes',
'new_gene_gene_links_KEGG', 'new_gene_gene_links_hallmark',
'whole_G_central_genes_scores',
'other_functions_GO', 'other_functions_KEGG', 'other_functions_immunological', 'other_functions_hallmark',
'sorted_central_genes_scores',
'sorted_central_functions_GO', 'sorted_central_functions_KEGG', 'sorted_central_functions_immunological', 'sorted_central_functions_hallmark',
'most_frequent_function_words_GO', 'most_frequent_function_words_KEGG', 'most_frequent_function_words_immunological', 'most_frequent_function_words_hallmark',
'all_sorted_genes'] +
[f'top_links_scores_central_genes<->community_{i}' for i in range(num_partitions)] +
[f'top_links_scores_with_community_{i}' for i in range(num_partitions)
]
)
# Saving dataframe
communities_df.to_pickle(data_as)
print(f"Saved the data to {data_as}!\n")
def run_enrichr(data, is_communities=False, is_positive_markers=True, group_types = 'all', on_targets=False, choose_fixed_tf=None,
data_type='all', top_n=50, algo='leiden', enrichr_library='MSigDB_Hallmark_2020'):
"""
Run enrichment analysis with Enrichr.
"""
import json
import requests
import sys
import io
out_folder = 'community_ana' if is_communities else 'cohort_ana'
if is_communities == True:
print('Running EnrichR on communities..')
algo = 'leiden'
_DATA_HOME = '/gpfs/projects/bsc08/bsc08890/res/covid_19'
if data_type == 'all':
community_data = pd.read_pickle(os.path.join(
_DATA_HOME, 'cell_types', data, 'data', 'grnboost2', f'{algo}_communities',
f'raw_data_communities_info.pickle'
))
else:
community_data = pd.read_pickle(os.path.join(
_DATA_HOME, 'cell_types', data, 'data', 'grnboost2', f'{algo}_communities',
f'raw_data_{data_type}_type_communities_info.pickle'
))
df = pd.concat([
pd.DataFrame({
'cluster': f'cluster_{i}',
'gene': [el[: el.find(' ')] for el in vals.split('; ')][:top_n]
}) for i, vals in community_data['all_sorted_genes'].iteritems()
], axis=0).reset_index(drop=True)
else:
if on_targets:
print('Running EnrichR on targets between 3 group types..')
types = ['C', 'M', 'S']
df = pd.concat([
pd.read_csv(
f'/gpfs/home/bsc08/bsc08890/tmp/cohort_ana/tmp_enrichr_{data}_{t}_{choose_fixed_tf}_target_list.tsv',
header=None, names=['gene']
).assign(cluster=t) for t in types
], axis=0)
else:
if group_types == 'all':
print('Running EnrichR on TFs between 3 group types..')
df = pd.read_csv(f'/gpfs/home/bsc08/bsc08890/tmp/tf_markers_df_{data}.tsv', sep='\t')
else:
print('Running EnrichR on 2 group types..')
if group_types == 'M_S':
group_types = 'S_M'
if group_types == 'C_M':
group_types = 'M_C'
if group_types == 'C_S':
group_types = 'S_C'
df_1 = pd.read_csv(f'/gpfs/home/bsc08/bsc08890/tmp/tf_markers_df_{group_types}_{data}.tsv', sep='\t')
df_1['gene'] = df_1.index
df_2 = df_1.copy()
df_2['avg_log2FC'] = - df_2['avg_log2FC']
df_1['cluster'], df_2['cluster'] = group_types.split('_')
df = pd.concat([df_1, df_2], axis=0)
if is_positive_markers:
df = df[(df['p_val_adj'] < 0.05) & (df['avg_log2FC'] > 1)]
else:
df = df[(df['p_val_adj'] < 0.05) & (df['avg_log2FC'] < -1)]
cluster_dfs = {}
for cl in df['cluster'].unique():
print(f'Processing {cl}..')
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/addList'
genes_str = '\n'.join(df[df['cluster'] == cl]['gene'])
description = f"{data}_{data_type}_{cl}"
if is_communities == True:
filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{cl}.tsv'
elif on_targets:
filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{choose_fixed_tf}_target_{cl}.tsv'
elif group_types == 'all':
filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{cl}.tsv'
else:
filename = f'tmp/{out_folder}/tmp_enrichr_{data}_2_groups_{cl}.tsv'
payload = {
'list': (None, genes_str),
'description': (None, description)
}
response = requests.post(ENRICHR_URL, files=payload)
if not response.ok:
raise Exception('Error analyzing gene list')
job_id = json.loads(response.text)
################################################################################
# Get enrichment results
#
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/export'
query_string = '?userListId=%s&filename=%s&backgroundType=%s'
user_list_id = str(job_id['userListId'])
gene_set_library = str(enrichr_library)
url = ENRICHR_URL + query_string % (user_list_id, filename, gene_set_library)
response = requests.get(url, stream=True)
print(' Enrichr API : Downloading file of enrichment results: Job Id:', job_id)
with open(filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
print(f' Saved to {filename}')
cluster_dfs[cl] = pd.read_csv(filename, sep='\t')
return cluster_dfs
def betweenness_centrality_parallel(G, processes=None):
"""Parallel betweenness centrality function"""
from multiprocessing import Pool
def chunks(l, n):
"""Divide a list of nodes `l` in `n` chunks"""
l_c = iter(l)
while 1:
x = tuple(itertools.islice(l_c, n))
if not x:
return
yield x
p = Pool(processes=processes)
node_divisor = len(p._pool) * 4
node_chunks = list(chunks(G.nodes(), int(G.order() / node_divisor)))
num_chunks = len(node_chunks)
bt_sc = p.starmap(
nx.betweenness_centrality_subset,
zip(
[G] * num_chunks,
node_chunks,
[list(G)] * num_chunks,
[True] * num_chunks,
['distance'] * num_chunks
),
)
# Reduce the partial solutions
bt_c = bt_sc[0]
for bt in bt_sc[1:]:
for n in bt:
bt_c[n] += bt[n]
return bt_c
|
masyahook/Single-cell-gene-regulatory-networks
|
scGRN/func.py
|
func.py
|
py
| 43,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "wordcloud.STOPWORDS.union",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "wordcloud.STOPWORDS",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "functools.reduce",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "networkx.spring_layout",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "networkx.DiGraph",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "networkx.spring_layout",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "networkx.spring_layout",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.ogrid",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "networkx.betweenness_centrality",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "networkx.closeness_centrality",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "tqdm.notebook.tqdm",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "networkx.betweenness_centrality",
"line_number": 319,
"usage_type": "attribute"
},
{
"api_name": "networkx.closeness_centrality",
"line_number": 319,
"usage_type": "attribute"
},
{
"api_name": "tqdm.notebook.tqdm",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 381,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 388,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 389,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 407,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 409,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 410,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 417,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 420,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 421,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 426,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 428,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 429,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 432,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 435,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 437,
"usage_type": "attribute"
},
{
"api_name": "termcolor.colored",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "community.community_louvain.best_partition",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "community.community_louvain",
"line_number": 453,
"usage_type": "name"
},
{
"api_name": "igraph.Graph.from_networkx",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "igraph.Graph",
"line_number": 455,
"usage_type": "attribute"
},
{
"api_name": "leidenalg.find_partition",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "leidenalg.ModularityVertexPartition",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "termcolor.colored",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "networkx.betweenness_centrality",
"line_number": 492,
"usage_type": "attribute"
},
{
"api_name": "networkx.closeness_centrality",
"line_number": 492,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 496,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 530,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.ListedColormap",
"line_number": 557,
"usage_type": "call"
},
{
"api_name": "seaborn.color_palette",
"line_number": 557,
"usage_type": "call"
},
{
"api_name": "colorcet.glasbey_bw",
"line_number": 557,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 565,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 565,
"usage_type": "name"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 569,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 576,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 582,
"usage_type": "call"
},
{
"api_name": "networkx.draw",
"line_number": 595,
"usage_type": "call"
},
{
"api_name": "networkx.draw_networkx_nodes",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 607,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 607,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 609,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 609,
"usage_type": "name"
},
{
"api_name": "pandas.Series",
"line_number": 625,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 715,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 793,
"usage_type": "call"
},
{
"api_name": "joblib.Parallel",
"line_number": 796,
"usage_type": "call"
},
{
"api_name": "joblib.delayed",
"line_number": 796,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 797,
"usage_type": "call"
},
{
"api_name": "pandas.read_pickle",
"line_number": 840,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 840,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 840,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_pickle",
"line_number": 845,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 845,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 845,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
"line_number": 850,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 851,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 865,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 866,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 876,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 885,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 891,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 922,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 927,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 938,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 948,
"usage_type": "call"
},
{
"api_name": "itertools.islice",
"line_number": 961,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 966,
"usage_type": "call"
},
{
"api_name": "networkx.betweenness_centrality_subset",
"line_number": 971,
"usage_type": "attribute"
}
] |
35299316629
|
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
from xml.etree import ElementTree as etree
from xml.dom import minidom
import untangle
def xml_generator(input_filename, input_foldername, exif_list, root_path):
root = ET.Element('annotation')
source = ET.SubElement(root, 'source')
image_date = ET.SubElement(source, 'date')
image_date.text = str(exif_list[0])
folder_name = ET.SubElement(source, 'folder')
folder_name.text = input_foldername
file_name = ET.SubElement(source, 'filename')
file_name.text = input_filename
gpsinfo = ET.SubElement(root, 'gpsinfo')
gps_altitude = ET.SubElement(gpsinfo, 'GPSAltitude')
gps_altitude.text = str(exif_list[1])
gps_latitude = ET.SubElement(gpsinfo, 'GPSLatitude')
gps_latitude.text = str(exif_list[2])
gps_latitude_ref = ET.SubElement(gpsinfo, 'GPSLatitudeRef')
gps_latitude_ref.text = str(exif_list[3])
gps_longitude = ET.SubElement(gpsinfo, 'GPSLongitude')
gps_longitude.text = str(exif_list[4])
gps_longitude_ref = ET.SubElement(gpsinfo, 'GPSLongitudeRef')
gps_longitude_ref.text = str(exif_list[5])
'''
There should be position annotation inside 'object' tag
'''
#ann_obj = ET.SubElement(root, 'object')
xml_string = etree.tostring(root)
tree = minidom.parseString(xml_string)
xml_string = tree.toxml()
save_path = '%s/ob_%s/%s.xml' % (root_path, input_foldername, input_filename[:-4])
f=open(save_path,'wb')
f.write(tree.toprettyxml(encoding='utf-8'))
f.close()
def xml_parsing(input_xml_file):
obj = untangle.parse(input_xml_file)
date_time = obj.annotation.source.date.cdata
GPSAltitude = obj.annotation.gpsinfo.GPSAltitude.cdata
GPSLatitude = obj.annotation.gpsinfo.GPSLatitude.cdata
GPSLatitudeRef = obj.annotation.gpsinfo.GPSLatitudeRef.cdata
GPSLongitude = obj.annotation.gpsinfo.GPSLongitude.cdata
GPSLongitudeRef = obj.annotation.gpsinfo.GPSLongitudeRef.cdata
xml_info_keys = ['Date', 'GPSAltitude', 'GPSLatitude', 'GPSLatitudeRef', 'GPSLongitude', 'GPSLongitudeRef']
xml_info_value = [date_time, GPSAltitude, GPSLatitude, GPSLatitudeRef, GPSLongitude, GPSLongitudeRef]
xml_info_dict = dict(zip(xml_info_keys, xml_info_value))
return xml_info_dict
#im = '/Users/xiang/ml_ann/ann_tools_eric/dataset/ob_curr/00001.xml'
#xml_parsing(im)
|
simonchanper/ml_ann
|
ann_tools_eric/xml_process.py
|
xml_process.py
|
py
| 2,403 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.tostring",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "xml.dom.minidom.parseString",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "untangle.parse",
"line_number": 47,
"usage_type": "call"
}
] |
4516368952
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Xiaoy LI
# Description:
# run_machine_comprehension.py
# Please Notice that the data should contain
# multi answers
# need pay MORE attention when loading data
import os
import argparse
import numpy as np
import random
import torch
from data_loader.model_config import Config
from data_loader.mrc_data_loader import MRCNERDataLoader
from data_loader.mrc_data_processor import Conll03Processor, MSRAProcessor, Onto4ZhProcessor, Onto5EngProcessor, GeniaProcessor, ACE2004Processor, ACE2005Processor, ResumeZhProcessor
from layer.optim import AdamW, lr_linear_decay, BertAdam
from model.bert_mrc import BertQueryNER
from data_loader.bert_tokenizer import BertTokenizer4Tagger
from metric.mrc_ner_evaluate import flat_ner_performance, nested_ner_performance
def args_parser():
# start parser
parser = argparse.ArgumentParser()
# requires parameters
parser.add_argument("--config_path", default="/home/lixiaoya/", type=str)
parser.add_argument("--data_dir", default=None, type=str)
parser.add_argument("--bert_model", default=None, type=str,)
parser.add_argument("--task_name", default=None, type=str)
parser.add_argument("--max_seq_length", default=128, type=int)
parser.add_argument("--train_batch_size", default=32, type=int)
parser.add_argument("--dev_batch_size", default=32, type=int)
parser.add_argument("--test_batch_size", default=32, type=int)
parser.add_argument("--checkpoint", default=100, type=int)
parser.add_argument("--learning_rate", default=5e-5, type=float)
parser.add_argument("--num_train_epochs", default=5, type=int)
parser.add_argument("--warmup_proportion", default=0.1, type=float)
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--seed", type=int, default=3006)
parser.add_argument("--output_dir", type=str, default="/home/lixiaoya/output")
parser.add_argument("--data_sign", type=str, default="msra_ner")
parser.add_argument("--weight_start", type=float, default=1.0)
parser.add_argument("--weight_end", type=float, default=1.0)
parser.add_argument("--weight_span", type=float, default=1.0)
parser.add_argument("--entity_sign", type=str, default="flat")
parser.add_argument("--n_gpu", type=int, default=1)
parser.add_argument("--dropout", type=float, default=0.2)
parser.add_argument("--entity_threshold", type=float, default=0.5)
parser.add_argument("--num_data_processor", default=1, type=int, help="number of data processor.")
parser.add_argument("--data_cache", default=True, action='store_false')
parser.add_argument("--export_model", default=True, action='store_false')
parser.add_argument("--do_lower_case", default=False, action='store_true', help="lower case of input tokens.")
parser.add_argument('--fp16', default=False, action='store_true', help="Whether to use MIX 16-bit float precision instead of 32-bit")
parser.add_argument("--amp_level", default="O2", type=str, help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
args = parser.parse_args()
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
return args
def load_data(config):
print("-*-"*10)
print("current data_sign: {}".format(config.data_sign))
if config.data_sign == "conll03":
data_processor = Conll03Processor()
elif config.data_sign == "zh_msra":
data_processor = MSRAProcessor()
elif config.data_sign == "zh_onto":
data_processor = Onto4ZhProcessor()
elif config.data_sign == "en_onto":
data_processor = Onto5EngProcessor()
elif config.data_sign == "genia":
data_processor = GeniaProcessor()
elif config.data_sign == "ace2004":
data_processor = ACE2004Processor()
elif config.data_sign == "ace2005":
data_processor = ACE2005Processor()
elif config.data_sign == "resume":
data_processor = ResumeZhProcessor()
else:
raise ValueError("Please Notice that your data_sign DO NOT exits !!!!!")
label_list = data_processor.get_labels()
tokenizer = BertTokenizer4Tagger.from_pretrained(config.bert_model, do_lower_case=config.do_lower_case)
dataset_loaders = MRCNERDataLoader(config, data_processor, label_list, tokenizer, mode="train", allow_impossible=True)
train_dataloader = dataset_loaders.get_dataloader(data_sign="train", num_data_processor=config.num_data_processor)
dev_dataloader = dataset_loaders.get_dataloader(data_sign="dev", num_data_processor=config.num_data_processor)
test_dataloader = dataset_loaders.get_dataloader(data_sign="test", num_data_processor=config.num_data_processor)
num_train_steps = dataset_loaders.get_num_train_epochs()
return train_dataloader, dev_dataloader, test_dataloader, num_train_steps, label_list
def load_model(config, num_train_steps, label_list):
device = torch.device("cuda")
n_gpu = config.n_gpu
model = BertQueryNER(config, )
model.to(device)
if config.n_gpu > 1:
model = torch.nn.DataParallel(model)
# prepare optimzier
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight", 'gamma', 'beta']
optimizer_grouped_parameters = [
{"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], "weight_decay": 0.01},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=config.learning_rate, betas=(0.9, 0.98), eps=1e-6, weight_decay=0.01)
# optimizer = BertAdam(optimizer_grouped_parameters, lr=config.learning_rate, warmup=config.warmup_proportion,
# t_total=num_train_steps, max_grad_norm=config.clip_grad)
sheduler = None
if config.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=config.amp_level)
# Distributed training (should be after apex fp16 initialization)
if config.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[config.local_rank], output_device=config.local_rank, find_unused_parameters=True
)
return model, optimizer, sheduler, device, n_gpu
def train(model, optimizer, sheduler, train_dataloader, dev_dataloader, test_dataloader, config, \
device, n_gpu, label_list):
dev_best_acc = 0
dev_best_precision = 0
dev_best_recall = 0
dev_best_f1 = 0
dev_best_loss = 10000000000000
test_acc_when_dev_best = 0
test_pre_when_dev_best = 0
test_rec_when_dev_best = 0
test_f1_when_dev_best = 0
test_loss_when_dev_best = 1000000000000000
model.train()
for idx in range(int(config.num_train_epochs)):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
print("#######"*10)
print("EPOCH: ", str(idx))
if idx != 0:
lr_linear_decay(optimizer)
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, start_pos, end_pos, span_pos, span_label_mask, ner_cate = batch
loss = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, \
start_positions=start_pos, end_positions=end_pos, span_positions=span_pos, span_label_mask=span_label_mask)
if config.n_gpu > 1:
loss = loss.mean()
if config.fp16:
from apex import amp
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_grad_norm)
optimizer.step()
model.zero_grad()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if nb_tr_steps % config.checkpoint == 0:
print("-*-"*15)
print("current training loss is : ")
print(loss.item())
model, tmp_dev_loss, tmp_dev_acc, tmp_dev_prec, tmp_dev_rec, tmp_dev_f1 = eval_checkpoint(model, dev_dataloader, config, device, n_gpu, label_list, eval_sign="dev")
print("......"*10)
print("DEV: loss, acc, precision, recall, f1")
print(tmp_dev_loss, tmp_dev_acc, tmp_dev_prec, tmp_dev_rec, tmp_dev_f1)
if tmp_dev_f1 > dev_best_f1 :
dev_best_acc = tmp_dev_acc
dev_best_loss = tmp_dev_loss
dev_best_precision = tmp_dev_prec
dev_best_recall = tmp_dev_rec
dev_best_f1 = tmp_dev_f1
# export model
if config.export_model:
model_to_save = model.module if hasattr(model, "module") else model
output_model_file = os.path.join(config.output_dir, "bert_finetune_model_{}_{}.bin".format(str(idx),str(nb_tr_steps)))
torch.save(model_to_save.state_dict(), output_model_file)
print("SAVED model path is :")
print(output_model_file)
model = model.cuda().to(device)
model, tmp_test_loss, tmp_test_acc, tmp_test_prec, tmp_test_rec, tmp_test_f1 = eval_checkpoint(model, test_dataloader, config, device, n_gpu, label_list, eval_sign="test")
print("......"*10)
print("TEST: loss, acc, precision, recall, f1")
print(tmp_test_loss, tmp_test_acc, tmp_test_prec, tmp_test_rec, tmp_test_f1)
test_acc_when_dev_best = tmp_test_acc
test_pre_when_dev_best = tmp_test_prec
test_rec_when_dev_best = tmp_test_rec
test_f1_when_dev_best = tmp_test_f1
test_loss_when_dev_best = tmp_test_loss
model = model.cuda().to(device)
print("-*-"*15)
print("=&="*15)
print("Best DEV : overall best loss, acc, precision, recall, f1 ")
print(dev_best_loss, dev_best_acc, dev_best_precision, dev_best_recall, dev_best_f1)
print("scores on TEST when Best DEV:loss, acc, precision, recall, f1 ")
print(test_loss_when_dev_best, test_acc_when_dev_best, test_pre_when_dev_best, test_rec_when_dev_best, test_f1_when_dev_best)
print("=&="*15)
def eval_checkpoint(model_object, eval_dataloader, config, device, n_gpu, label_list, eval_sign="dev"):
# input_dataloader type can only be one of dev_dataloader, test_dataloader
eval_loss = 0
start_pred_lst = []
end_pred_lst = []
span_pred_lst = []
start_scores_lst = []
end_scores_lst = []
mask_lst = []
start_gold_lst = []
span_gold_lst = []
end_gold_lst = []
eval_steps = 0
ner_cate_lst = []
for input_ids, input_mask, segment_ids, start_pos, end_pos, span_pos, span_label_mask, ner_cate in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
start_pos = start_pos.to(device)
end_pos = end_pos.to(device)
span_pos = span_pos.to(device)
span_label_mask = span_label_mask.to(device)
with torch.no_grad():
model_object.eval()
tmp_eval_loss = model_object(input_ids, segment_ids, input_mask, start_pos, end_pos, span_pos, span_label_mask)
start_labels, end_labels, span_scores = model_object(input_ids, segment_ids, input_mask)
start_pos = start_pos.to("cpu").numpy().tolist()
end_pos = end_pos.to("cpu").numpy().tolist()
span_pos = span_pos.to("cpu").numpy().tolist()
start_label = start_labels.detach().cpu().numpy().tolist()
end_label = end_labels.detach().cpu().numpy().tolist()
span_scores = span_scores.detach().cpu().numpy().tolist()
span_label = span_scores
input_mask = input_mask.to("cpu").detach().numpy().tolist()
ner_cate_lst += ner_cate.numpy().tolist()
eval_loss += tmp_eval_loss.mean().item()
mask_lst += input_mask
eval_steps += 1
start_pred_lst += start_label
end_pred_lst += end_label
span_pred_lst += span_label
start_gold_lst += start_pos
end_gold_lst += end_pos
span_gold_lst += span_pos
if config.entity_sign == "flat":
eval_accuracy, eval_precision, eval_recall, eval_f1 = flat_ner_performance(start_pred_lst, end_pred_lst, span_pred_lst, start_gold_lst, end_gold_lst, span_gold_lst, ner_cate_lst, label_list, threshold=config.entity_threshold, dims=2)
else:
eval_accuracy, eval_precision, eval_recall, eval_f1 = nested_ner_performance(start_pred_lst, end_pred_lst, span_pred_lst, start_gold_lst, end_gold_lst, span_gold_lst, ner_cate_lst, label_list, threshold=config.entity_threshold, dims=2)
average_loss = round(eval_loss / eval_steps, 4)
eval_f1 = round(eval_f1 , 4)
eval_precision = round(eval_precision , 4)
eval_recall = round(eval_recall , 4)
eval_accuracy = round(eval_accuracy , 4)
model_object.train()
return model_object, average_loss, eval_accuracy, eval_precision, eval_recall, eval_f1
def merge_config(args_config):
model_config_path = args_config.config_path
model_config = Config.from_json_file(model_config_path)
model_config.update_args(args_config)
model_config.print_config()
return model_config
def main():
args_config = args_parser()
config = merge_config(args_config)
train_loader, dev_loader, test_loader, num_train_steps, label_list = load_data(config)
model, optimizer, sheduler, device, n_gpu = load_model(config, num_train_steps, label_list)
train(model, optimizer, sheduler, train_loader, dev_loader, test_loader, config, device, n_gpu, label_list)
if __name__ == "__main__":
main()
|
wusongxu/mrc-for-flat-nested-ner
|
run/train_bert_mrc.py
|
train_bert_mrc.py
|
py
| 14,951 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "data_loader.mrc_data_processor.Conll03Processor",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "data_loader.mrc_data_processor.MSRAProcessor",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "data_loader.mrc_data_processor.Onto4ZhProcessor",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "data_loader.mrc_data_processor.Onto5EngProcessor",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "data_loader.mrc_data_processor.GeniaProcessor",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "data_loader.mrc_data_processor.ACE2004Processor",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "data_loader.mrc_data_processor.ACE2005Processor",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "data_loader.mrc_data_processor.ResumeZhProcessor",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "data_loader.bert_tokenizer.BertTokenizer4Tagger.from_pretrained",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "data_loader.bert_tokenizer.BertTokenizer4Tagger",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "data_loader.mrc_data_loader.MRCNERDataLoader",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc.BertQueryNER",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc.to",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "model.bert_mrc.named_parameters",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "layer.optim.AdamW",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "apex.amp.initialize",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "apex.amp",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "torch.nn.parallel.DistributedDataParallel",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc",
"line_number": 151,
"usage_type": "argument"
},
{
"api_name": "torch.nn",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "model.bert_mrc",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc.train",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "layer.optim.lr_linear_decay",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "apex.amp.scale_loss",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "apex.amp",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "apex.amp.master_params",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "apex.amp",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "model.bert_mrc.parameters",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc.zero_grad",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc",
"line_number": 224,
"usage_type": "argument"
},
{
"api_name": "model.bert_mrc.module",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc.cuda",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "model.bert_mrc",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc.cuda",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "metric.mrc_ner_evaluate.flat_ner_performance",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "metric.mrc_ner_evaluate.nested_ner_performance",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "data_loader.model_config.Config.from_json_file",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "data_loader.model_config.Config",
"line_number": 324,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "model.bert_mrc",
"line_number": 335,
"usage_type": "argument"
}
] |
5759183851
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@AUTHOR:Joselyn Zhao
@CONTACT:[email protected]
@HOME_PAGE:joselynzhao.top
@SOFTWERE:PyCharm
@FILE:main.py
@TIME:2019/6/13 10:32
@DES:
'''
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
from tensorflow.examples.tutorials.mnist import input_data
from Lenet import *
from PIL import Image
mnist = input_data.read_data_sets('../../../data/mnist', one_hot=True)
x_test = np.reshape(mnist.test.images, [-1, 28, 28, 1])
x_test = np.pad(x_test, ((0, 0), (2, 2), (2, 2), (0, 0)),
'constant') # print("Updated Image Shape: {}".format(X_train[0].shape))
tf.logging.set_verbosity(old_v)
iteratons = 1000
batch_size = 64
ma = 0
sigma = 0.1
lr = 0.01
def get_sample100(label):
sample100_x=[]
sample100_y=[]
count = 0
for i in range(len(mnist.test.images)):
if mnist.test.labels[i][label]==1:
count+=1
sample100_y.append(mnist.test.labels[i])
sample100_x.append(mnist.test.images[i])
if count>=100:
break
return sample100_x,sample100_y
def train_lenet(lenet):
with tf.Session() as sess: #这个session需要关闭么?
sess.run(tf.global_variables_initializer())
tf.summary.image("input",lenet.x,3)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("LOGDIR/4/",sess.graph) # 保存到不同的路径下
# writer.add_graph(sess.graph)
for ii in range(iteratons):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
batch_xs = np.reshape(batch_xs,[-1,28,28,1])
batch_xs = np.pad(batch_xs,((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')
sess.run(lenet.train_step,feed_dict ={lenet.x:batch_xs,lenet.y_:batch_ys})
if ii % 50 == 1:
acc,s = sess.run([lenet.accuracy,merged_summary],feed_dict ={lenet.x:x_test,lenet.y_:mnist.test.labels})
writer.add_summary(s,ii)
print("%5d: accuracy is: %4f" % (ii, acc))
sample100_x,sample100_y = get_sample100(4) #随便选了一个label 输入0-9的值
sample100_x = np.reshape(sample100_x,[-1,28,28,1])
sample100_x = np.pad(sample100_x, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')
x_min = tf.reduce_min(lenet.fc2)
x_max = tf.reduce_max(lenet.fc2)
fc2 = (lenet.fc2 - x_min) / (x_max - x_min)
fc2 = sess.run(fc2,feed_dict={lenet.x:sample100_x,lenet.y_:sample100_y})
plt.imshow(fc2)
plt.show()
print('[accuracy,loss]:', sess.run([lenet.accuracy], feed_dict={lenet.x:x_test,lenet.y_:mnist.test.labels}))
if __name__ =="__main__":
act = "sigmoid"
lenet = Lenet(ma,sigma,lr,act)
train_lenet(lenet)
|
joselynzhao/DeepLearning.Advanceing
|
DL_6/work/main.py
|
main.py
|
py
| 2,860 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "tensorflow.logging.get_verbosity",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tensorflow.logging",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.logging.set_verbosity",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tensorflow.logging",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "numpy.reshape",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tensorflow.logging.set_verbosity",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tensorflow.logging",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Session",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.image",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.merge_all",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.FileWriter",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "numpy.reshape",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_min",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_max",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
}
] |
36670049284
|
import matplotlib.pyplot as plt
# from mpl_toolkits.axes_grid1 import ImageGrid
# import numpy as np
from os import listdir
from os import chdir
from os import path
from PIL import Image
# import matplotlib.gridspec as gridspec
import argparse
parser = argparse.ArgumentParser(description="generate plot for report")
parser.add_argument("--input_dir", required=True, help="Input ROS bag.")
parser.add_argument("--rows", required=True, help="numer of rows in figure")
parser.add_argument("--cols", required=True, help="number of columns in figure")
args = parser.parse_args()
# chdir('/Volumes/macOS Big Sur/Users/pmvanderburg/matplotlib_test/')
chdir(args.input_dir)
files = listdir(args.input_dir)
files.sort()
for i, f in enumerate(files):
if f!='.DS_Store':
print(i,f)
else:
del files[i]
images = [Image.open(f) for f in files]
print(len(images))
max_rows = 7
max_cols = 3
# max_rows = 3
# max_cols = 2
methods=['Input image',
'640x480 N+FT',
'832x256 K+FT',
'640x480 N',
'832x256 N',
'640x480 K',
'832x256 K']
fig, axes = plt.subplots(nrows=7, ncols=3, figsize=(9,10),sharex=True, sharey=True)
for idx, image in enumerate(images):
# print(files[idx])
print(idx)
row = idx % max_rows
col = idx // max_rows
print(row,' row')
print(col,' col')
# if col>0:
# axes[row, col].axis("off")
axes[row,col].spines['bottom'].set_color('#ffffff')
axes[row,col].spines['top'].set_color('#ffffff')
axes[row,col].spines['right'].set_color('#ffffff')
axes[row,col].spines['left'].set_color('#ffffff')
if image.size==(1280, 720):
image = image.resize((640,480))
axes[row, col].imshow(image, cmap="gray", aspect="auto")
axes[row, 0].set_ylabel(methods[row])
plt.subplots_adjust(wspace=.05, hspace=.05)
plt.xticks([])
plt.yticks([])
# fig.savefig(path.join)
plt.show()
|
ThijsvdBurg/Husky_scripts
|
data_visualization/plot scripts/plot_results.py
|
plot_results.py
|
py
| 1,911 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
}
] |
32044432835
|
import re
import io
keywords=["int","void","main","print"]
operators = { '=': 'Assignment Operator','+': 'Additon Operator', '-' : 'Substraction Operator', '/' : 'Division Operator', '*': 'Multiplication Operator'}
optr_keys = operators.keys()
symbols = {';':'semi_colon','{' : 'left_brace', '}':'right_brace', '(':'left_parenthesis',')':'right_parenthesis' ,'[':'left_sqbracket',']':'right_sqbracket'}
symbol_keys = symbols.keys()
the_ch = " "
the_line = 1
token_list = []
error = []
#get the next character from the input file
def next_ch():
global the_ch, the_line
the_ch = input_file.read(1)
if the_ch == '\n':
the_line += 1
return the_ch
#handle identifiers and numbers
def identifier_or_number(line_no):
text = ""
while the_ch.isalnum() or the_ch == '_' or the_ch =='.':
text += the_ch
next_ch()
if len(text) == 0:
error_msg = "Unrecognized character "+the_ch+" found in line : "+str(line_no)
error.append(error_msg)
next_ch()
return '' , '' , ''
elif text in keywords:
token_list.append(text)
return line_no, text, "Keyword"
elif text in re.findall('[_a-zA-Z][_a-zA-Z0-9]*',text):
token_list.append('id')
return line_no , text , 'Identifier'
elif text in re.findall('[0-9]+[.]?[0-9]*',text):
token_list.append('num')
return line_no , text , 'Number'
elif text not in re.findall('[_a-zA-Z ][_a-zA-Z0-9 ]*',text):
error_msg=text+" is an invalid identifier found in line : "+str(line_no)
error.append(error_msg)
return '','',''
#return the next token type
def getToken():
while the_ch.isspace():
next_ch()
line_no = the_line
if len(the_ch) == 0:
token_list.append('$')
return line_no, '$' , 'Enf_of_input'
elif the_ch in symbol_keys:
token = the_ch
token_list.append(token)
sym = symbols[token]
next_ch()
return line_no, token , sym
elif the_ch in optr_keys:
token = the_ch
token_list.append(token)
opr = operators[token]
next_ch()
return line_no, token , opr
else:
return identifier_or_number(line_no)
#opening input file
f = open("input.txt", "r")
i = f.read()
program = re.sub('//.*?\n|/\*.*?\*/', '', i, flags=re.S) #removes all comment lines from input file
input_file = io.StringIO(program) #converting string to file object
print("\nOutput of Lexical Analyser\n--------------------------\n")
print("%5s %7s %9s" % ("Line No.", "Token","Meaning"))
print("----------------------------")
while True:
t = getToken()
line = t[0]
token = t[1]
meaning = t[2]
if token != '' and token != '$':
print("%5s %9s %-15s" % (line, token, meaning))
elif token == '$':
break
#display error msg if found any
if len(error) != 0:
print("\n\n-------------",len(error),"ERROR FOUND ---------------\n")
for msg in error:
print(msg)
print("\n-------------------------------------------")
exit(0)
print("\nThere are total",line,"lines in program")
print("Tokens:",token_list)
|
Ashwintlp/CD-Mini-Project
|
lexical_analyser.py
|
lexical_analyser.py
|
py
| 3,333 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "re.findall",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "io.StringIO",
"line_number": 95,
"usage_type": "call"
}
] |
35986807996
|
import datetime
import os
# From https://www.quora.com/Whats-the-best-spaced-repetition-schedule. Not really scientific, but it seems decent.
schedule = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
if __name__ == '__main__':
today = datetime.date.today()
schedule_days = [today - datetime.timedelta(days=i) for i in schedule]
schedule_days = [i.strftime("%m/%d") for i in schedule_days]
output_string = ", ".join(schedule_days)
# God, applescript is literally the worst :'(. Why does this exist? Who thought this was a good idea?
# Also, you'll have to change this for a different OS, but this should work on pretty much all OSX versions
os.system("osascript -e 'display notification \"%s\" with title \"Spaced Repetition Reminder\"\'" % output_string)
|
cgrebosky/SpacedRepetitionReminder
|
main.py
|
main.py
|
py
| 779 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.date.today",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 16,
"usage_type": "call"
}
] |
24044811304
|
#compare parameter between abc-smc
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
from scipy import stats
from matplotlib.colors import LogNorm, Normalize
from scipy.signal import argrelextrema
filename=["ACDC_X2","ACDC_Y2","ACDC_Z2"]#,"ACDC_all"]
#filename=['ACDC_X2']
filename=['ACDC_X2','ACDC_X21ind']
n=['final']
#n=['1','2','3','4','5','6','7','8','9','10','11','12','final']#'13','14','15','final']
#n=['1','2','3','4','5','6','7','8','9','10','11','12','13','final']#,'12','13','14','final']#,'15']#,'final']
path='C:/Users/Administrator/Desktop/Modeling/AC-DC/'
path='/users/ibarbier/AC-DC/'
sys.path.insert(0, path + filename[0])
import model_equation as meq
parlist=meq.parlist
namelist=[]
for i,par in enumerate(parlist):
namelist.append(parlist[i]['name'])
par0 = {
'K_ARAX':-3.5,#0.01,
'n_ARAX':2,
'K_XY':-2.5,
'n_XY':2,
'K_XZ':-1.55,#-1.25
'n_XZ':2,
'beta_X':1,
'alpha_X':0,
'delta_X':1,
'K_ARAY':-3.5,
'n_ARAY':2,
'K_YZ':-3.5,
'n_YZ':2,
'beta_Y':1,
'alpha_Y':0,
'delta_Y':1,
'K_ZX':-2.5,
'n_ZX':2,
'beta_Z':1,
'alpha_Z':0,
'delta_Z':1,
'beta/alpha_X':2,
'beta/alpha_Y':2,
'beta/alpha_Z':2
}
def pars_to_dict(pars,parlist):
### This function is not necessary, but it makes the code a bit easier to read,
### it transforms an array of pars e.g. p[0],p[1],p[2] into a
### named dictionary e.g. p['k0'],p['B'],p['n'],p['x0']
### so it is easier to follow the parameters in the code
dict_pars = {}
for ipar,par in enumerate(parlist):
dict_pars[par['name']] = pars[ipar]
return dict_pars
def load(number= n,filename=filename,parlist=parlist):
namelist=[]
for i,par in enumerate(parlist):
namelist.append(parlist[i]['name'])
number=str(number)
filepath = path+filename+'/smc/pars_' + number + '.out'
dist_path = path+filename+'/smc/distances_' + number + '.out'
raw_output= np.loadtxt(filepath)
dist_output= np.loadtxt(dist_path)
df = pd.DataFrame(raw_output, columns = namelist)
df['dist']=dist_output
df=df.sort_values('dist',ascending=False)
distlist= sorted(df['dist'])
p=[]
for dist in distlist:
p_0=df[df['dist']==dist]
p0=[]
for n in namelist:
p0.append(p_0[n].tolist()[0])
p0=pars_to_dict(p0,parlist)
p.append(p0)
return p, df
def get_stats(filename,namelist):
stats_df = pd.DataFrame( columns = ['par','file','mean','sd','mode'])
parl = np.append(namelist,'dist')
# for fi,fnm in enumerate(filename):
fnm=filename[0]
p,df= load(n[0],fnm,parlist)
mean=np.mean(df).tolist()
sd=np.std(df).tolist()
mode=stats.mode(df)[0][0]
new_row={'par':parl,'file':[fnm]*len(parl),'mean':mean,'sd':sd,'mode':mode}
df2=pd.DataFrame(new_row)
stats_df =stats_df.append(df2)
return stats_df
def bar_plot(filename,namelist, t="mean"):
stats_df=get_stats(filename,namelist)
# set width of bars
barWidth = 0.20
# Set position of bar on X axis
r1 = np.arange(len(parl))
#mean
if t=="mean":
for i,nm in enumerate(filename):
v=stats_df[stats_df['method']==nm]
plt.bar((r1+barWidth*i),v['mean'],yerr=v['sd'], capsize=2,width=barWidth, label=nm)
plt.xlabel('par', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(parl))], parl)
plt.legend()
plt.show()
#mode
if t == "mode":
for i,nm in enumerate(filename):
v=stats_df[stats_df['method']==nm]
plt.bar((r1+barWidth*i),v['mode'],width=barWidth, label=nm)
plt.xlabel('par', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(parl))], parl)
plt.legend()
plt.show()
def plot_compare(n,filename,namelist):
parl = np.append(namelist,'dist')
index=1
size=round(np.sqrt(len(parl)))
for i,name in enumerate(parl):
plt.subplot(size,size,index)
plt.tight_layout()
for fi,fnm in enumerate(filename):
p,df= load(n,fnm,namelist1)
sns.kdeplot(df[name],bw_adjust=.8,label=fnm)
#plt.ylim(0,1)
if i < (len(parl)-2):
plt.xlim((parlist[i]['lower_limit'],parlist[i]['upper_limit']))
index=index+1
if index==5:
plt.legend(bbox_to_anchor=(1.05, 1))
#sns.kdeplot(df['K_XZ'])
plt.savefig(str(filename)+str(n)+"_compareplot.pdf", bbox_inches='tight')
plt.show()
#plot_compare(n[0],filename,namelist)
def plot_alltime(filename,namelist):
parl = np.append(namelist,'dist')
index=1
for i,name in enumerate(parl):
plt.subplot(4,4,index)
plt.tight_layout()
for ni,nmbr in enumerate(n):
p,df= load(nmbr,filename[0],parlist)
sns.kdeplot(df[name],bw_adjust=.8,label=nmbr)
#plt.ylim(0,1)
if i < (len(parl)-2):
plt.xlim((parlist[i]['lower_limit'],parlist[i]['upper_limit']))
index=index+1
#if index==5:
plt.legend(bbox_to_anchor=(1.05, 1))
plt.show()
#plot_alltime(['ACDC_X2'],namelist)
def plotdistpar(filename,namelist):
index=1
for ni,nb in enumerate(n):
p,df= load(nb,filename[0],parlist)
for i,name in enumerate(namelist):
plt.subplot(len(n),len(namelist),index)
# plt.tight_layout()
plt.scatter(df['dist'],df[name],s=1)
mean=np.mean(df[name]).tolist()
mode=stats.mode(df[name])[0][0]
plt.plot([0,40],[mean,mean],'r',label="mean")
plt.plot([0,40],[mode,mode],'g',label="meode")
plt.ylim((parlist[i]['lower_limit'],parlist[i]['upper_limit']))
plt.ylabel(name)
index=index+1
plt.legend(bbox_to_anchor=(1.05, 1))
plt.show()
'''
ARA=np.logspace(-4.5,-2.,10,base=10)
p,df= load(n[0],filename[0],parlist)
stdf=get_stats(filename,namelist)
pmean=pars_to_dict(stdf['mean'])
pmode=pars_to_dict(stdf['mode'])
for i,p in enumerate([p[0],pmean,pmode,p[999]]):
X,Y,Z=meq.model(ARA,p)
df_X=pd.DataFrame(X,columns=ARA)
df_Y=pd.DataFrame(Y,columns=ARA)
df_Z=pd.DataFrame(Z,columns=ARA)
plt.subplot(4,3,(1+3*i))
sns.heatmap(df_X, cmap="Reds")
plt.subplot(4,3,(2+3*i))
sns.heatmap(df_Y, cmap ='Blues')
plt.subplot(4,3,(3+3*i))
sns.heatmap(df_Z, cmap ='Greens')
plt.show()
X,Y,Z=meq.model(ARA,pmode)
plt.plot(X[:,0],label="DCoff")
plt.plot(X[:,3],label="AC1")
plt.plot(X[:,6],label="AC2")
plt.plot(X[:,9],label="DCon")
plt.plot([200,200],[0,1000],'--')
plt.legend(bbox_to_anchor=(1.05, 1))
plt.tight_layout()
plt.show()
'''
#####1indvs2ind
def plotdesnity1vs2():
p2,df2= load('final','ACDC_X2',parlist)
parlist1=parlist.copy()
del parlist1[7:9]
p1,df1= load('final','ACDC_X21ind',parlist1)
namelist=[]
for i,par in enumerate(parlist1):
namelist.append(par['name'])
parl = np.append(namelist,'dist')
index=1
for i,name in enumerate(parl):
plt.subplot(4,4,index)
plt.tight_layout()
sns.kdeplot(df1[name],bw_adjust=.8,label='X_1ind')
sns.kdeplot(df2[name],bw_adjust=.8,label='X_2ind')
#plt.ylim(0,1)
if i < (len(parl)-2):
plt.xlim((parlist1[i]['lower_limit'],parlist1[i]['upper_limit']))
index=index+1
if index==5:
plt.legend(bbox_to_anchor=(1.05, 1))
#sns.kdeplot(df['K_XZ'])
plt.savefig("1vs2ind"+str(n[0])+"_compareplot.pdf", bbox_inches='tight')
#plt.show()
plotdesnity1vs2()
def ind1vs2indmeanandmode():
p2,df2= load('final','ACDC_X',parlist)
df2=df2.drop(columns=['K_ARAY', 'n_ARAY'])
mean_df2=np.mean(df2)
sd_df2=np.std(df2)
mode_df2=stats.mode(df2)[0][0]
parlist1=parlist.copy()
del parlist1[7:9]
p1,df1= load('12','ACDC_1ind',parlist1)
mean_df1=np.mean(df1)
sd_df1=np.std(df1)
mode_df1=stats.mode(df1)[0][0]
namelist=[]
for i,par in enumerate(parlist1):
namelist.append(par['name'])
parl = np.append(namelist,'dist')
# set width of bars
barWidth = 0.30
# Set position of bar on X axis
r1 = np.arange(len(parl))
plt.bar((r1+barWidth*0),mean_df1,yerr=sd_df1, capsize=2,width=barWidth, label="1ind")
plt.bar((r1+barWidth*1),mean_df2,yerr=sd_df2, capsize=2,width=barWidth, label="2ind")
plt.xlabel('par', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(parl))], parl)
plt.legend()
plt.show()
plt.bar((r1+barWidth*0),mode_df1,width=barWidth, label="1ind")
plt.bar((r1+barWidth*1),mode_df2,width=barWidth, label="2ind")
plt.xlabel('par', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(parl))], parl)
plt.legend()
plt.show()
def calculateSS(ARA,parUsed):
#sort ss according to their stabilitz
#create stability list of shape : arabinose x steady x X,Y,Z
unstable=np.zeros((len(ARA),3,3))
stable=np.zeros((len(ARA),3,3))
oscillation=np.zeros((len(ARA),3,3))
unstable[:]=np.nan
stable[:]=np.nan
oscillation[:]=np.nan
for ai,a in enumerate(ARA):
ss=meq.findss(a,parUsed)
if len(ss) > 3:
print("error: more than 3 steadystates")
else:
d = b = c=0 # can replace a,b,c by si, but allow to have osccilation on the same level
for si,s in enumerate(ss):
e=meq.stability(a,parUsed,[s])[0][0]
if all(e<0):
stable[ai][d]=s
d+=1
if any(e>0):
pos=e[e>0]
if len(pos)==2:
if pos[0]-pos[1] == 0:
oscillation[ai][b]=s
b+=1
else:
unstable[ai][c]=s
c+=1
else:
unstable[ai][c]=s
c+=1
return unstable,stable,oscillation
#chose parameter
def bifurcation(parUsed=None):
p,df= load('final','ACDC_X2',parlist)
#parUsed=par0
if parUsed == None:
parUsed=p[0]
ARA=np.logspace(-4.5,-2.,20,base=10)
ss=meq.findss(ARA[0],parUsed)[0]
#print(ss)
init=[ss[0],ss[1],ss[2]]
X,Y,Z=meq.model(ARA,parUsed,totaltime=100,init=init)
df_X=pd.DataFrame(X[500:],columns=ARA)
sns.heatmap(df_X, cmap="Reds", norm=LogNorm())
plt.show()
xss,yss,zss = calculateSScurve(ARA,parUsed)
maxX=[]
minX=[]
maxY=[]
minY=[]
maxZ=[]
minZ=[]
# X,Y,Z=meq.model(ARA,parUsed,totaltime=400)
delta=10e-5
for i in np.arange(0,len(ARA)):
min_x=[np.nan,np.nan,np.nan]
max_x=[np.nan,np.nan,np.nan]
ss=meq.findss(ARA[i],parUsed)
for si,s in enumerate(ss):
init=[s[0]+delta,s[1]+delta,s[2]+delta]
X,Y,Z=meq.model(ARA,parUsed,totaltime=100,init=init)
# print(max(X[200:,i]))
max_x[si]=max(X[200:,i])
min_x[si]=min(X[200:,i])
maxX.append(max_x)
minX.append(min_x)
# minX.append(min(X[200:,i]))
maxY.append(max(Y[200:,i]))
minY.append(min(Y[200:,i]))
maxZ.append(max(Z[200:,i]))
minZ.append(min(Z[200:,i]))
plt.subplot(3,1,1)
plt.plot(ARA,xss,'--o')
plt.plot(ARA,maxX,'-b')
plt.plot(ARA,minX,'-g')
#plt.fill_between(ARA,maxX,minX,alpha=0.2,facecolor='red')
plt.yscale("log")
plt.xscale("log")
plt.subplot(3,1,2)
plt.plot(ARA,yss,'--b')
# plt.plot(ARA,maxY,'-b')
# plt.plot(ARA,minY,'-b')
# plt.fill_between(ARA,maxY,minY,alpha=0.2,facecolor='blue')
plt.yscale("log")
plt.xscale("log")
plt.subplot(3,1,3)
plt.plot(ARA,zss,'--g')
# plt.plot(ARA,maxZ,'-g')
# plt.plot(ARA,minZ,'-g')
# plt.fill_between(ARA,maxZ,minZ,alpha=0.2,facecolor='green')
plt.yscale("log")
plt.xscale("log")
plt.show()
def getlimitcycle(ARA,ssl,par,tt=500):
M=np.ones((len(ARA),3,3))*np.nan
m=np.ones((len(ARA),3,3))*np.nan
delta=10e-5
transient=500
for ai,a in enumerate(ARA):
ss=ssl[ai]
for si,s in enumerate(ss):
if any(np.isnan(s)) == False:
init=[s[0]+delta,s[1]+delta,s[2]+delta]
X,Y,Z=meq.model([a],par,totaltime=tt,init=init)
M[ai,si,0]=max(X[transient:])
M[ai,si,1]=max(Y[transient:])
M[ai,si,2]=max(Z[transient:])
m[ai,si,0]=min(X[transient:])
m[ai,si,1]=min(Y[transient:])
m[ai,si,2]=min(Z[transient:])
max_list=argrelextrema(X[transient:], np.greater)
maxValues=X[transient:][max_list]
min_list=argrelextrema(X[transient:], np.less)
minValues=X[transient:][min_list]
maximaStability = abs(maxValues[-2]-minValues[-2])-(maxValues[-3]-minValues[-3])
if maximaStability > 0.01:
print("limit cycle not achieved for ARA["+str(ai)+"]:" + str(a) + " at st.s:"+ str(s))
return M,m
def bifurcation_plot(n,filename):
p,df= load(n,filename,parlist)
ARA=np.logspace(-4.5,-2.,200,base=10)
un,st,osc=calculateSS(ARA,p[1])
M,m=getlimitcycle(ARA,osc,p[1],tt=500)
for i,col in enumerate(['r','b','g']):
plt.subplot(3,1,i+1)
plt.plot(ARA,un[:,:,i],'--'+col)
plt.plot(ARA,st[:,:,i],'-'+col)
plt.plot(ARA,osc[:,:,i],'--'+col)
plt.fill_between(ARA,M[:,0,i],m[:,0,i],alpha=0.2,facecolor=col)
plt.fill_between(ARA,M[:,1,i],m[:,1,i],alpha=0.2,facecolor=col)
plt.fill_between(ARA,M[:,2,i],m[:,2,i],alpha=0.2,facecolor=col)
plt.yscale("log")
plt.xscale("log")
plt.show()
#bifurcation(p[1])
|
icvara/AC-DC
|
compareplot.py
|
compareplot.py
|
py
| 14,082 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.insert",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "model_equation.parlist",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.loadtxt",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "scipy.stats.mode",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "numpy.append",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "seaborn.kdeplot",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "numpy.append",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "seaborn.kdeplot",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "scipy.stats.mode",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "numpy.append",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "seaborn.kdeplot",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "seaborn.kdeplot",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "scipy.stats.mode",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "scipy.stats.mode",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "numpy.append",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 325,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "model_equation.findss",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "model_equation.stability",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "model_equation.findss",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "model_equation.model",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "seaborn.heatmap",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.LogNorm",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 368,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 383,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 384,
"usage_type": "attribute"
},
{
"api_name": "model_equation.findss",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "model_equation.model",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 407,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 408,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 410,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yscale",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xscale",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 413,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 415,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yscale",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 419,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xscale",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 420,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 421,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 422,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yscale",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 426,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xscale",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 427,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 428,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 432,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 433,
"usage_type": "attribute"
},
{
"api_name": "numpy.isnan",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "model_equation.model",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "scipy.signal.argrelextrema",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "numpy.greater",
"line_number": 449,
"usage_type": "attribute"
},
{
"api_name": "scipy.signal.argrelextrema",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "numpy.less",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "numpy.logspace",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 470,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 471,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 472,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 473,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.fill_between",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 474,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.fill_between",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 475,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.fill_between",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 476,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yscale",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xscale",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 478,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 479,
"usage_type": "name"
}
] |
10858272527
|
# coding: utf-8
# In[1]:
from pandas import DataFrame, read_csv
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import sys
from time import time
import numpy as np
# In[2]:
df = pd.read_csv('lyrics.csv')
df.head(10)
# In[3]:
df['lyrics'].replace('', np.nan, inplace=True)
df.dropna(subset=['lyrics'], inplace=True)
ind_drop = df[df['genre'].apply(lambda x: x.startswith('Other'))].index
df = df.drop(ind_drop)
# In[4]:
ind_drop = df[df['genre'].apply(lambda x: x.startswith('Not Available'))].index
df = df.drop(ind_drop)
# In[5]:
ind_drop = df[df['lyrics'].apply(lambda x: x.startswith('INSTRUMENTAL'))].index
df = df.drop(ind_drop)
df.drop(columns=['index'])
ind_drop = df[df['lyrics'].apply(lambda x: x.startswith('instrumental'))].index
df = df.drop(ind_drop)
df.drop(columns=['index'])
# In[6]:
genre=df['genre'].values
lyrics=df['lyrics'].values
true_k = len(np.unique(genre))
print(np.unique(genre), "The total number of genres are", true_k)
#shaping:
lyrics = np.array(lyrics)[:,None]
print(lyrics.shape)
genre = np.array(genre)[:,None]
print(genre.shape)
# In[7]:
data = np.append(lyrics,genre,axis=1)
data.shape
print(data)
# In[8]:
np.random.shuffle(data)
data_test = data[10001:20001,]
data = data[:10000,]
# In[9]:
data_lyrics=data[:,0]
data_genre=data[:,1]
data_lyrics_test = data_test[:,0]
data_genre_test = data_test[:,1]
# print(data_lyrics)
# print(data_genre.shape)
# In[10]:
vectorizer = TfidfVectorizer(
max_df=0.75, # max doc freq (as a fraction) of any word to include in the vocabulary
min_df=0.3, # min doc freq (as doc counts) of any word to include in the vocabulary
max_features=10000, # max number of words in the vocabulary
stop_words='english', # remove English stopwords
use_idf=True )
# In[11]:
labels={'Country':1, 'Electronic':2, 'Folk':3, 'Hip-Hop':4, 'Indie':5, 'Jazz':6,
'Metal':7, 'Pop':8, 'R&B':9, 'Rock':10}
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
vectorizer.fit(data_lyrics)
X = vectorizer.transform(data_lyrics)
Y = [labels[i] for i in data_genre]
X_test = vectorizer.transform(data_lyrics_test)
Y_test = [labels[i] for i in data_genre_test]
n_features = X.shape[1]
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
# In[12]:
doc_ind = 1 # Index of an example document
xi = X[doc_ind,:].todense()
term_ind = xi.argsort()[:, ::-1]
xi_sort = xi[0,term_ind]
terms = vectorizer.get_feature_names()
for i in range(n_features):
term = terms[term_ind[0,i]]
tfidf = xi[0,term_ind[0,i]]
print('{0:20s} {1:f} '.format(term, tfidf))
# In[13]:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=True)
# In[14]:
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
# In[15]:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
# In[16]:
labels={'Country':1, 'Electronic':2, 'Folk':3, 'Hip-Hop':4, 'Indie':5, 'Jazz':6,
'Metal':7, 'Pop':8, 'R&B':9, 'Rock':10}
print(labels.values)
# genre_names
# data_genre
genre_labels=[]
#print(genre_labels.shape)
for j,i in enumerate(data_genre):
x=labels[i]
#print(x)
np.append(genre_labels,x)
genre_labels.append(x)
#print(genre_labels)
# In[17]:
print((Y_test == km.predict(X_test)).sum() / len(Y_test))
# In[18]:
labelkm = km.labels_
print(labelkm.shape)
print(type(labelkm))
# In[19]:
#print(data_genre)
labelkm = km.labels_
from sklearn.metrics import confusion_matrix
C = confusion_matrix(genre_labels,labelkm)
Csum = np.sum(C,axis=0)
Cnorm = C / Csum[None,:]
print(Cnorm)
print(np.array_str(C, precision=3, suppress_small=True))
plt.imshow(C, interpolation='none')
plt.colorbar()
|
TejaishwaryaGagadam/music_genre_predictor
|
K_Means_Clustering.py
|
K_Means_Clustering.py
|
py
| 4,472 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "numpy.unique",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.array_str",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 230,
"usage_type": "name"
}
] |
28713863068
|
import torch
import pandas as pd
import os
from shutil import copy
from utils import fix_randomness, save_to_df, _logger, report_results, get_nonexistant_path, copy_Files
from dataloader.dataloader import data_generator
from trainer.training_evaluation import cross_domain_test
from datetime import datetime
from itertools import product
from args import args
import wandb
start_time = datetime.now()
device = torch.device(args.device)
da_method = args.da_method
save_dir = args.save_dir
data_type = args.selected_dataset
data_path = f"./data/{data_type}"
base_model_type = args.base_model
experiment_description = args.experiment_description
if not os.path.exists(save_dir):
os.mkdir(save_dir)
exec(f'from trainer.{da_method} import cross_domain_train')
exec(f'from config_files.{data_type}_Configs import Config as Configs')
exec(f'from models.models import {base_model_type} as base_model')
configs = Configs()
# os.environ["WANDB_MODE"] = "dryrun"
os.environ["WANDB_SILENT"] = 'true'
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
# torch.backends.cudnn.enabled = False # another solution for lstm lunch faiulure issue
def main_train_cd():
# find out the domains IDs
data_files = os.listdir(data_path)
data_files = [i for i in data_files if "train" in i]
sources = [i[6] for i in data_files]
src_tgt_product = [sources, sources]
simple_column_names = ['Run ID',
'source_loss', 'source_acc',
'target_loss', 'target_acc',]
column_names_mean = ['Scenario',
'Source_only_loss_mean', 'Source_only_acc_mean',
f'{da_method}_loss_mean', f'{da_method}_acc_mean',
f'Source_only_loss_std', 'Source_only_acc_std',
f'{da_method}_loss_std', f'{da_method}_acc_std']
simple_df= pd.DataFrame(columns=simple_column_names)
mean_df = pd.DataFrame(columns=column_names_mean)
# Logging
# cwd = os.getcwd()
# exp_log_dir = os.path.join(r"D:\Autoregressive Domain Adaptation for Time series data\Last",save_dir, experiment_description, f"{da_method}_{data_type}_{args.run_description}")
exp_log_dir = os.path.join(os.getcwd(),save_dir, experiment_description, f"{da_method}_{data_type}_{args.run_description}")
exp_log_dir = get_nonexistant_path(exp_log_dir)
# os.makedirs(exp_log_dir, exist_ok=True)
# copy(f"/home/mohamed/SLARADA/config_files/{data_type}_configs.py", f"{exp_log_dir}/{data_type}_configs.py")
# copy(f"/home/mohamed/SLARADA/trainer/{da_method}.py", f"{exp_log_dir}/{da_method}_script.py")
# copy("/home/mohamed/SLARADA/args.py", f"{exp_log_dir}/args.py")
copy_Files(exp_log_dir, data_type, da_method)
# loop through domains
# loop through domains
counter = 0
src_counter = 0
for src_id, tgt_id in product(*src_tgt_product):
# for src_id in ['a', 'b', 'c']:
# for tgt_id in ['a', 'b','c']:
if src_id != tgt_id:
# prepare save directory
# specify number of consecutive runs
for run_id in range(args.num_runs):
fix_randomness(run_id)
# Logging
log_dir = os.path.join(exp_log_dir, src_id + "_to_" + tgt_id + "_run_"+ str(run_id))
os.makedirs(log_dir, exist_ok=True)
log_file_name = os.path.join(log_dir, f"logs_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log")
logger = _logger(log_file_name)
logger.debug("=" * 45)
logger.debug(f'Dataset: {data_type}')
logger.debug(f'Method: {da_method}')
logger.debug("=" * 45)
logger.debug(f'Source: {src_id} ---> Target: {tgt_id}')
logger.debug(f'Run ID: {run_id}')
logger.debug("=" * 45)
# Load datasets
src_train_dl, src_valid_dl, src_test_dl = data_generator(data_path, src_id, configs)
tgt_train_dl, tgt_valid_dl, tgt_test_dl = data_generator(data_path, tgt_id, configs)
if args.tensorboard:
wandb.init(project="SLARDA", group = f'{da_method}_{data_type}', name=f'{src_id}_to_{tgt_id}_run_{run_id}', config=configs,
sync_tensorboard=False, reinit=True, dir=r"./visualize/", )
source_model, target_model = cross_domain_train(src_train_dl, src_valid_dl, src_test_dl,
tgt_train_dl, tgt_valid_dl, base_model,
src_id, tgt_id,
device, logger, configs)
scores = cross_domain_test(source_model, target_model, src_id, tgt_id,
src_train_dl, tgt_train_dl, src_test_dl, tgt_test_dl,
device, log_dir, logger)
run_name = f"domain_{src_id}_run_{run_id}"
outs = (run_name,) + scores
simple_df.loc[counter] = outs
counter += 1
input_data = [f"{src_id}-->{tgt_id}"]
input_data.extend(simple_df.iloc[-args.num_runs:, 1:].mean().array)
input_data.extend(simple_df.iloc[-args.num_runs:, 1:].std().array)
mean_df.loc[src_counter] = input_data
src_counter += 1
# Printing and saving final results
print(simple_df.to_string())
print(mean_df.to_string())
printed_results = mean_df[['Scenario', 'Source_only_acc_mean', 'Source_only_acc_std', f'{da_method}_acc_mean', f'{da_method}_acc_std']]
mean = mean_df[['Source_only_acc_mean', 'Source_only_acc_std', f'{da_method}_acc_mean', f'{da_method}_acc_std']].mean()
printed_results.loc[len(printed_results)] = mean
printed_results.at[len(printed_results)-1, 'Scenario'] = 'Average'
logger.debug(f"Total training time is {datetime.now() - start_time}")
logger.debug('=' * 45)
logger.debug(f'Results using: {da_method}')
logger.debug('=' * 45)
logger.debug(mean_df.to_string())
logger.debug(printed_results.to_string())
print_res_name = os.path.basename(exp_log_dir)
simple_df.to_excel(f'{exp_log_dir}/full_res_results_{print_res_name}.xlsx')
printed_results.to_excel(f'{exp_log_dir}/printed_results_{print_res_name}.xlsx')
if args.tensorboard:
wandb.log({"Full_results": wandb.Table(dataframe=simple_df)})
wandb.log({"Printed_results": wandb.Table(dataframe=printed_results)})
if __name__ == "__main__":
wandb.config = configs
main_train_cd()
|
mohamedr002/SLARDA
|
Autorgressive_Adaptation/train_CD.py
|
train_CD.py
|
py
| 6,844 |
python
|
en
|
code
| 23 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "args.args.device",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "args.args.da_method",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "args.args.save_dir",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "args.args.selected_dataset",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "args.args.base_model",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "args.args.experiment_description",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "args.args.run_description",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "utils.get_nonexistant_path",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "utils.copy_Files",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "args.args.num_runs",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "utils.fix_randomness",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "utils._logger",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "dataloader.dataloader.data_generator",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "dataloader.dataloader.data_generator",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "args.args.tensorboard",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "wandb.init",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "trainer.training_evaluation.cross_domain_test",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "args.args.num_runs",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "args.args.num_runs",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "args.args.tensorboard",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "args.args",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "wandb.log",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "wandb.Table",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "wandb.log",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "wandb.Table",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "wandb.config",
"line_number": 149,
"usage_type": "attribute"
}
] |
31965379047
|
#!/usr/bin/env python3
import numpy as np
import urllib.request
import cv2
import binascii
import lorem
import math
import matplotlib.pyplot as plt
def encode_as_binary_array(msg):
"""Encode a message as a binary string."""
msg = msg.encode("utf-8")
msg = msg.hex()
msg = [msg[i:i + 2] for i in range(0, len(msg), 2)]
msg = [bin(int(el, base=16))[2:] for el in msg]
msg = ["0" * (8 - len(el)) + el for el in msg]
return "".join(msg)
def decode_from_binary_array(array):
"""Decode a binary string to utf8."""
array = [array[i:i+8] for i in range(0, len(array), 8)]
if len(array[-1]) != 8:
array[-1] = array[-1] + "0" * (8 - len(array[-1]))
array = [hex(int(el, 2))[2:].zfill(2) for el in array]
array = "".join(array)
result = binascii.unhexlify(array)
return result.decode("utf-8", errors="replace")
def hide_message(image, message, nbits=1):
"""Hide a message in an image (LSB).
nbits: number of least significant bits
"""
nbits = clamp(nbits, 1, 8)
shape = image.shape
image = np.copy(image).flatten()
if len(message) > len(image) * nbits:
raise ValueError("Message is to long :(")
chunks = [message[i:i + nbits] for i in range(0, len(message), nbits)]
for i, chunk in enumerate(chunks):
byte = str(bin(image[i]))[2:].zfill(8)
new_byte = byte[:-nbits] + chunk
image[i] = int(new_byte, 2)
return image.reshape(shape)
def clamp(n, minn, maxn):
"""Clamp the n value to be in range (minn, maxn)."""
return max(min(maxn, n), minn)
def reveal_message(image, nbits=1, length=0):
"""Reveal the hidden message.
nbits: number of least significant bits
length: length of the message in bits.
"""
nbits = clamp(nbits, 1, 8)
shape = image.shape
image = np.copy(image).flatten()
length_in_pixels = math.ceil(length/nbits)
if len(image) < length_in_pixels or length_in_pixels <= 0:
length_in_pixels = len(image)
message = ""
i = 0
while i < length_in_pixels:
byte = str(bin(image[i]))[2:].zfill(8)
message += byte[-nbits:]
i += 1
mod = length % -nbits
if mod != 0:
message = message[:mod]
return message
print("Downloading image!")
path = 'https://picsum.photos/500/500'
resp = urllib.request.urlopen(path)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
print("Image downloaded!")
message = lorem.text()*1000
secret = encode_as_binary_array(message)
resultImageRow1 = None
resultImageRow2 = None
nbitsList = range(1, 9)
nbitsMSE = []
for nbits in nbitsList:
print(nbits)
imageSecret = hide_message(image, secret[:int(image.size*0.8)], nbits)
mse = ((imageSecret - image)**2).mean()
nbitsMSE.append(mse)
if nbits <= 4:
resultImageRow1 = imageSecret if resultImageRow1 is None else np.hstack(
[resultImageRow1, imageSecret])
else:
resultImageRow2 = imageSecret if resultImageRow2 is None else np.hstack(
[resultImageRow2, imageSecret])
plt.plot(nbitsList, nbitsMSE)
plt.xlabel('nbits')
plt.ylabel('MSE')
cv2.namedWindow("Result", cv2.WINDOW_NORMAL)
cv2.imshow('Result', np.vstack([resultImageRow1, resultImageRow2]))
cv2.imwrite('ex2_encoded.png', np.vstack([resultImageRow1, resultImageRow2]))
cv2.waitKey(1)
plt.savefig('ex2_plot.png')
plt.show()
cv2.waitKey()
# Dla nbits=7,8 MSE zmalał, ponieważ widoczna jest większa część bazowego obrazu
# - wiadomość zapisano na mniejszej liczbie plkseli
|
damiankoper/iobLab
|
ex2.py
|
ex2.py
|
py
| 3,584 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "binascii.unhexlify",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "cv2.imdecode",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_COLOR",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "lorem.text",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "cv2.namedWindow",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_NORMAL",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "cv2.waitKey",
"line_number": 125,
"usage_type": "call"
}
] |
9650271610
|
from tornado.web import RequestHandler
from tornado.web import gen
from controller import favoriteTopicController
import json
# 收藏话题
class AddFavoriteTopic(RequestHandler):
@gen.coroutine
def post(self):
session_id = self.get_argument('session_id')
topicId = self.get_argument('topicId')
data = favoriteTopicController.createFavoriteTopic(session_id, topicId)
self.write(json.dumps(data))
# 取消收藏的话题
class RemoveFavoriteTopic(RequestHandler):
@gen.coroutine
def post(self):
session_id = self.get_argument('session_id')
topicId = self.get_argument('topicId')
data = favoriteTopicController.removeFavoriteTopic(session_id, topicId)
self.write(json.dumps(data))
# 获取用户收藏的话题
class GetFavoriteTopic(RequestHandler):
@gen.coroutine
def post(self):
session_id = self.get_argument('session_id')
x = self.get_argument('x')
n = self.get_argument('n')
data = favoriteTopicController.retrieveFavoriteTopic(session_id, x, n)
self.write(json.dumps(data))
|
zhuxiyulu/sugar
|
handlers/favoriteTopicHandler.py
|
favoriteTopicHandler.py
|
py
| 1,145 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "tornado.web.RequestHandler",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "controller.favoriteTopicController.createFavoriteTopic",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "controller.favoriteTopicController",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tornado.web.gen.coroutine",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tornado.web.gen",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tornado.web.RequestHandler",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "controller.favoriteTopicController.removeFavoriteTopic",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "controller.favoriteTopicController",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tornado.web.gen.coroutine",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tornado.web.gen",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "tornado.web.RequestHandler",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "controller.favoriteTopicController.retrieveFavoriteTopic",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "controller.favoriteTopicController",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tornado.web.gen.coroutine",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "tornado.web.gen",
"line_number": 27,
"usage_type": "name"
}
] |
71476890109
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import grad
import numpy as np
from torch.autograd import Variable
from collections import OrderedDict
class Discriminator(nn.Module):
def __init__(self, input_size, hidden_size, batch_size, dp_keep_prob):
super(Discriminator, self).__init__()
#initilization of variables
self.hidden_size = hidden_size
self.batch_size = batch_size
self.dp_keep_prob = dp_keep_prob
self.lamda = 10
#Initialize hidden layers
self.hidden0 = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(),
#nn.Dropout(1- dp_keep_prob)
)
self.hidden1 = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
#nn.Dropout(1- dp_keep_prob)
)
self.hidden2 = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
#nn.Dropout(1- dp_keep_prob)
)
self.out = nn.Sequential(
torch.nn.Linear(hidden_size, 1),
torch.nn.Sigmoid()
)
self.optimizer = optim.SGD(self.parameters(), lr=np.exp(-3))
def forward(self, inputs):
x = self.hidden0(inputs)
x = self.hidden1(x)
x = self.hidden2(x)
x = self.out(x)
return x
def train(self, x, y, type_loss = "None"):
self.optimizer.zero_grad()
x_prediction = self.forward(x)
y_prediction = self.forward(y)
loss = 0;
if type_loss == "JSD":
loss = self.loss_JSD(x_prediction, y_prediction)
elif type_loss == "WD":
loss = self.loss_WD(x_prediction, y_prediction, self.Get_z_value(x,y, self.batch_size))
else :
loss = self.loss(x_prediction, y_prediction)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def loss(self, x_pred, y_pred):
return - (torch.mean(torch.log(x_pred)) + torch.mean(torch.log(1- y_pred)))
def loss_JSD(self, x_pred, y_pred):
return -(torch.log(torch.tensor([[2.]])) + 1/2*torch.mean(torch.log(x_pred)) + 1/2*torch.mean(torch.log((1- y_pred))))
def loss_WD(self, x_pred, y_pred, gradient_penalty):
return -(torch.mean(x_pred) - torch.mean(y_pred) - gradient_penalty)
def Get_z_value(self, x, y, size):
a = torch.empty(size,1).uniform_(0,1)
z_value = a*x+ (1-a)*y
z_value.requires_grad = True
out_interp = self.forward(z_value)
gradients = grad(outputs=out_interp, inputs=z_value,
grad_outputs=torch.ones(out_interp.size()),
retain_graph=True, create_graph=True, only_inputs=True)[0]
# Mean/Expectation of gradients
gradients = gradients.view(gradients.size(0), -1)
gradient_norm = gradients.norm(2, dim=1)
gradient_penalty = (self.lamda * torch.mean(((gradient_norm -1)**2)))
gradient_penality = Variable(gradient_penalty.data, requires_grad = True)
return gradient_penalty
|
gchafouleas/IFT6135_Assignment3
|
discriminator.py
|
discriminator.py
|
py
| 3,131 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.SGD",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.log",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.log",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.empty",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.autograd.grad",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 88,
"usage_type": "call"
}
] |
72531832829
|
"""add cluster id in comp_runs
Revision ID: 83f9d2a470ef
Revises: dd8220be55ad
Create Date: 2021-08-31 20:02:45.709839+00:00
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "83f9d2a470ef"
down_revision = "dd8220be55ad"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("comp_runs", sa.Column("cluster_id", sa.BigInteger(), nullable=True))
op.create_foreign_key(
"fk_comp_runs_cluster_id_clusters",
"comp_runs",
"clusters",
["cluster_id"],
["id"],
onupdate="CASCADE",
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(
"fk_comp_runs_cluster_id_clusters", "comp_runs", type_="foreignkey"
)
op.drop_column("comp_runs", "cluster_id")
# ### end Alembic commands ###
|
ITISFoundation/osparc-simcore
|
packages/postgres-database/src/simcore_postgres_database/migration/versions/83f9d2a470ef_add_cluster_id_in_comp_runs.py
|
83f9d2a470ef_add_cluster_id_in_comp_runs.py
|
py
| 980 |
python
|
en
|
code
| 35 |
github-code
|
6
|
[
{
"api_name": "alembic.op.add_column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.BigInteger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_foreign_key",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_constraint",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 37,
"usage_type": "name"
}
] |
22791755556
|
import sys
sys.path.insert(0, '../../class')
import os
import time
import nnet
import cubelattice as cl
import multiprocessing
from functools import partial
from scipy.io import loadmat
import numpy as np
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Verification Settings')
parser.add_argument('--property', type=str, default='1')
parser.add_argument('--n1', type=int, default=2)
parser.add_argument('--n2', type=int, default=3)
parser.add_argument('--compute_unsafety', action='store_true')
args = parser.parse_args()
i = args.n1
j = args.n2
def verification(afv):
safe = True
return safe
print("neural_network_"+str(i)+str(j))
nn_path = "nets/neural_network_information_"+str(i)+str(j)+".mat"
filemat = loadmat(nn_path)
if not os.path.isdir('logs'):
os.mkdir('logs')
W = filemat['W'][0]
b = filemat['b'][0]
lb = [-0.1,-0.1,-0.1]
ub = [0.1,0.1,0.1]
nnet0 = nnet.nnetwork(W, b)
nnet0.verification = verification
initial_input = cl.cubelattice(lb, ub).to_lattice()
cpus = multiprocessing.cpu_count()
pool = multiprocessing.Pool(cpus)
nnet0.start_time = time.time()
nnet0.filename = "logs/output_info"+str(i)+str(j)+'.txt'
outputSets = []
nputSets0 = nnet0.singleLayerOutput(initial_input, 0)
pool.map(partial(nnet0.layerOutput, m=1), nputSets0)
pool.close()
elapsed_time = time.time() - nnet0.start_time
print('time elapsed: %f seconds \n' % elapsed_time)
print('result: safe\n')
filex = open(nnet0.filename, 'w')
filex.write('time elapsed: %f seconds \n' % elapsed_time)
filex.write('result: safe\n')
filex.close()
|
Shaddadi/veritex
|
examples/Microbenchmarks/main.py
|
main.py
|
py
| 1,739 |
python
|
en
|
code
| 10 |
github-code
|
6
|
[
{
"api_name": "sys.path.insert",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "nnet.nnetwork",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cubelattice.cubelattice",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 54,
"usage_type": "call"
}
] |
22186680287
|
import os
from flask import Flask, render_template, request, redirect, url_for
from werkzeug.utils import secure_filename
from main import classify_image # imports function from main.py
app = Flask(__name__, static_url_path='/static')
app.config['UPLOAD_FOLDER'] = 'static/uploads'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
if 'image' not in request.files:
return redirect(request.url)
file = request.files['image']
if file.filename == '':
return redirect(request.url)
if file:
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
model_path = "models/ensemble_model_v2.h5" # ensemble model
result = classify_image(file_path, model_path)
return render_template('result.html', result=result, image_path=url_for('static', filename='uploads/' + filename))
if __name__ == '__main__':
app.run() # debug=True to debug
|
princemexy/Cervical-cell-classification-using-various-Deep-Learning-Models
|
app.py
|
app.py
|
py
| 1,115 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.files",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request.url",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.request.files",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.request.url",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "werkzeug.utils.secure_filename",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "main.classify_image",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 35,
"usage_type": "call"
}
] |
70329767867
|
import requests, re, os, json
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
class Check_Influences(object):
def __init__(self):
self.folder_data_name = '../../files/data'
self.json_name_link = '../../files/name_links.json'
self.good_json_file_name = '../../files/good_name_links.json'
self.wiki_base_url = 'https://en.wikipedia.org'
def bio_table(self, page):
# open url with bs
name = page.rsplit('/')[-1]
file = open(page)
page = file.read()
file.close()
soup = BeautifulSoup(page, "html.parser")
# get biography table
table = soup.find('table', class_='infobox biography vcard')
#print(len(table.find_all('ul', class_='NavContent')))
try:
# get influencers unordered list
influencers = table.find_all('ul', class_='NavContent')[0]
except:
influencers = []
try:
# get influenced unordered list
influenced = table.find_all('ul', class_='NavContent')[1]
except:
influenced = []
#print(influenced)
final_influencers = []
final_influenced = []
# We want a list of titles of wikipedia pages
if influencers != []:
for a in influencers.find_all('a'):
try:
# extract the title
final_influencers.append(a.get('title'))
except:
pass
# We want a list of titles of wikipedia pages
if influenced != []:
for a in influenced.find_all('a'):
try:
# extract the title
final_influenced.append(a.get('title'))
except:
pass
return name, final_influencers,final_influenced
def get_all_files(self):
return os.listdir(self.folder_data_name)
def get_content_of_file(self):
file = open(self.good_json_file_name)
content = json.load(file)
file.close()
return content
def has_infobox_table(self, file_name):
file = open(file_name)
file_content = file.read()
file.close()
if len(re.findall(r'infobox biography vcard', file_content)) != 0: return True
return False
def write_json_file(self, lista):
content = {"name_links":lista}
file = open(self.good_json_file_name, 'w')
json.dump(content, file, indent = 4)
file.close()
def get_good_cs(self):
all_files = self.get_all_files()
good_cs = []
for file_name in all_files:
file_name = self.folder_data_name + '/' + file_name
if self.has_infobox_table(file_name): good_cs.append(file_name)
return good_cs
def make_good_cs(self, good_cs):
to_write = []
for item in good_cs:
item_name = item.rsplit('/', 1)[-1]
item_link = self.wiki_base_url + '/' + item_name
to_write.append({item_name:item_link})
self.write_json_file(to_write)
def get_good_cs_files(self):
json_content = self.get_content_of_file()
name_file_list = []
for item in json_content['name_links']:
name = list(item.keys())[0]
name_file_list.append(self.folder_data_name + '/' + name)
return name_file_list
def check_influences(self, good_cs_filenames):
count = 0
for file_name in good_cs_filenames:
print(file_name)
#req = urllib.request.Request(, data)
file = open(file_name)
respData = file.read()
file.close()
paragraphs = re.findall(r'<table class="infobox(.*?)</table>', str(respData))
for eachP in paragraphs:
if "Influences" or "Influenced" in eachP:
r = re.findall(r'Influence(.*?)Influenced', str(eachP))
for e in r:
influenze = re.findall(r'title="(.*?)"', str(e))
for i in influenze:
print(file_name + "," + i)
p = re.findall(r'Influenced(.*?)colspan="2"', str(eachP))
for el in p:
influenzati = re.findall(r'title="(.*?)"', str(el))
for inf in influenzati:
print(inf + "," + file_name)
#print(count)
def main(self):
all_files = self.get_all_files()
good_cs = self.get_good_cs()
print("Good Computer Scientists: %d" % len(good_cs))
self.make_good_cs(good_cs)
good_cs_filenames = self.get_good_cs_files()
#self.check_influences(good_cs_filenames)
for i in good_cs_filenames:
print(self.bio_table(i))
if __name__ == '__main__':
Check_Influences_Obj = Check_Influences()
Check_Influences_Obj.main()
|
LucaTomei/Computer_Scientists
|
src/Second_Phase/check_influences.py
|
check_influences.py
|
py
| 4,271 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "bs4.BeautifulSoup",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 125,
"usage_type": "call"
}
] |
74796407546
|
import torchvision
from torch import nn
# train_dataset = torchvision.datasets.ImageNet(root="../dataset_ImageNet", transform=torchvision.transforms.ToTensor(),
# split='train', download=True)
vgg16 = torchvision.models.vgg16(pretrained=False)
vgg16_pretrain = torchvision.models.vgg16(pretrained=True)
print(vgg16_pretrain)
train_dataset = torchvision.datasets.CIFAR10(root="../dataset_CIFAR10", transform=torchvision.transforms.ToTensor(),
train=True, download=True)
vgg16_pretrain.classifier.add_module('add_linear', nn.Linear(1000, 10)) #模型修改-增加层
vgg16.classifier[6] = nn.Linear(4096, 10) #模型修改-修改层
print(vgg16)
|
ccbit1997/pytorch_learning
|
src/model_change.py
|
model_change.py
|
py
| 737 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torchvision.models.vgg16",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torchvision.models.vgg16",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
}
] |
7935267536
|
import random
import cv2
import numpy
import pygame
# 单个粒子
class Particle():
def __init__(self, rect, w):
self.rect = rect
self.w = w
self.dis = 0
self.hist = []
self.dx = 0
self.dy = 0
def update(self, pixelArray, width, height):
self.rect.centerx = int(random.gauss(self.rect.centerx + self.dx, 5))
if self.rect.right >= width:
self.rect.right = width - 1
if self.rect.left < 0:
self.rect.left = 0
self.rect.centery = int(random.gauss(self.rect.centery + self.dy, 5))
if self.rect.bottom >= height:
self.rect.bottom = height - 1
if self.rect.top < 0:
self.rect.top = 0
bgr = pixelArray[self.rect.left:self.rect.right,
self.rect.top:self.rect.bottom, ::-1]
# 计算HSV直方图
hsv = cv2.cvtColor(bgr, cv2.COLOR_RGB2HSV)
targetH = cv2.calcHist([hsv], [0], None, [8], [0, 179])
targetS = cv2.calcHist([hsv], [1], None, [8], [0, 255])
_targetH = numpy.zeros((8, 1))
_targetS = numpy.zeros((8, 1))
_targetH = cv2.normalize(targetH, _targetH)
_targetS = cv2.normalize(targetS, _targetS)
self.hist = [_targetH, _targetS]
def get_dis(self, target_hist):
# 值越小,相似度越高
disH = cv2.compareHist(target_hist[0], self.hist[0],
cv2.HISTCMP_BHATTACHARYYA)
disS = cv2.compareHist(target_hist[1], self.hist[1],
cv2.HISTCMP_BHATTACHARYYA)
self.dis = (disH + disS)
if self.dis == 0:
self.dis = 0.0001
return self.dis
def draw(self, screen):
pygame.draw.rect(screen, (255, 0, 0), self.rect,
1)
# 粒子滤波,表示目标跟踪器
class Tracker():
count = 0
def __init__(self, rect, feature):
self.particle_num = 50
self.particles = []
self.rect = rect
self.last_pos = rect
self.feature = feature
self.time_since_update = 0 # 表示距离上次匹配的帧数
self.id = Tracker.count
Tracker.count += 1
self.history = [] # 跟踪历史
self.hits = 0 # 匹配成功次数
self.hit_streak = 0 # 连续匹配成功次数
self.age = 0 # 跟踪帧数
self.dx = 0
self.dy = 0
for i in range(0, self.particle_num):
self.particles.append(Particle(rect.copy(),
1 / self.particle_num))
def update(self, rect, dx, dy):
# 与detection匹配
for particle in self.particles:
particle.rect = rect.copy()
particle.w = 1 / self.particle_num
particle.dx = dx
particle.dy = dy
self.rect = rect
self.last_pos = rect
self.dx = dx
self.dy = dy
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1
def predict(self, pixelArray, width, height):
# 权重归一化因子
w = 0
# 有效粒子数
N = 0
for particle in self.particles:
particle.update(pixelArray, width, height)
particle.w = 1 / particle.get_dis(self.feature) * particle.w
w += particle.w
for particle in self.particles:
particle.w = particle.w / w
N += particle.w ** 2
N = 1 / N
# print(N)
if N < 0.6 * self.particle_num:
self.resample()
self.get_pos()
# 更新参数
self.age += 1
if self.time_since_update > 0:
self.hit_streak = 0
self.time_since_update += 1
self.history.append(self.rect)
def resample(self):
# 产生0到1的随机数
stage = [random.uniform(0, 1) for i in range(0, self.particle_num)]
sum = [0] * self.particle_num
s = 0
# 建立权重阶梯
for i in range(0, self.particle_num):
s += self.particles[i].w
sum[i] = s
# 计算随机数落到某一阶梯的个数
times = [0] * self.particle_num
for i in range(0, self.particle_num):
j = 0
while j < self.particle_num:
if stage[i] > sum[j]:
j += 1
else:
times[j] += 1
break
cop = self.particles[:]
self.particles.clear()
# 根据比例重新生成粒子
for i in range(0, self.particle_num):
for j in range(0, times[i]):
self.particles.append(Particle(cop[i].rect.copy(),
1 / self.particle_num))
def get_pos(self):
x = 0
y = 0
for particle in self.particles:
x += particle.w * particle.rect.left
y += particle.w * particle.rect.top
self.rect = pygame.Rect(x, y, self.rect.width, self.rect.height)
def show_predict(self, screen):
pygame.draw.rect(screen, (255, 0, 0), self.rect, 3)
class Monitor():
def __init__(self, screen, ai_settings, max_age=1, min_hits=3):
self.screen = screen
self.ai_settings = ai_settings
self.pixelArray = None
self.trackers = []
self.detections = []
self.width, self.height = screen.get_size()
self.max_age = max_age
self.min_hits = min_hits
def get_background(self):
width, length = self.screen.get_size()
self.pixelArray = numpy.zeros((width, length, 3), dtype='uint8')
pygame.pixelcopy.surface_to_array(self.pixelArray, self.screen)
def backDiff(self):
# 背景差分法
self.detections.clear()
pixelArray = cv2.subtract(self.ai_settings.bg, self.pixelArray)
gray_bg = cv2.cvtColor(pixelArray, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_bg, 10, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
y, x, h, w = cv2.boundingRect(c)
rect = pygame.Rect(x, y, w, h)
self.detections.append(rect)
def associate_detections_to_trackers(self, iou_threshold=0.3):
"""
Assigns detections to tracked object (both represented as bounding boxes)
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
"""
if (len(self.trackers) == 0):
return numpy.empty((0, 2), dtype=int), numpy.arange(
len(self.detections)), numpy.empty((0, 5), dtype=int)
iou_matrix = numpy.zeros((len(self.detections),
len(self.trackers)))
for d, det in enumerate(self.detections):
for t, trk in enumerate(self.trackers):
iou_matrix[d, t] = self.iou(det, trk.rect)
if min(iou_matrix.shape) > 0:
a = (iou_matrix > iou_threshold).astype(numpy.int32)
if a.sum(1).max() == 1 and a.sum(0).max() == 1:
matched_indices = numpy.stack(numpy.where(a), axis=1)
else:
matched_indices = self.linear_assignment(-iou_matrix)
else:
matched_indices = numpy.empty(shape=(0, 2))
unmatched_detections = []
for d, det in enumerate(self.detections):
if (d not in matched_indices[:, 0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t, trk in enumerate(self.trackers):
if (t not in matched_indices[:, 1]):
unmatched_trackers.append(t)
# filter out matched with low IOU
matches = []
for m in matched_indices:
if (iou_matrix[m[0], m[1]] < iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1, 2))
if (len(matches) == 0):
matches = numpy.empty((0, 2), dtype=int)
else:
matches = numpy.concatenate(matches, axis=0)
return matches, numpy.array(unmatched_detections), numpy.array(
unmatched_trackers)
def update(self):
# 更新每个跟踪器
for tracker in self.trackers:
tracker.predict(self.pixelArray, self.width, self.height)
# 用匹配结果更新跟踪器
matched, unmatched_dets, unmatched_trks = \
self.associate_detections_to_trackers()
# update matched trackers with assigned detections
for m in matched:
rect = self.detections[m[0]]
dx = rect.centerx - self.trackers[m[1]].last_pos.centerx
dy = rect.centery - self.trackers[m[1]].last_pos.centery
self.trackers[m[1]].update(rect, dx, dy)
# create and initialise new trackers for unmatched detections
for i in unmatched_dets:
rect = self.detections[i]
bgr = self.pixelArray[rect.left:rect.right,
rect.top:rect.bottom, ::-1]
hsv = cv2.cvtColor(bgr, cv2.COLOR_RGB2HSV)
targetH = cv2.calcHist([hsv], [0],
None,
[8], [0, 179])
targetS = cv2.calcHist([hsv], [1],
None,
[8], [0, 255])
_targetH = numpy.zeros((8, 1))
_targetS = numpy.zeros((8, 1))
_targetH = cv2.normalize(targetH, _targetH)
_targetS = cv2.normalize(targetS, _targetS)
trk = Tracker(rect.copy(), [_targetH, _targetS])
self.trackers.append(trk)
i = len(self.trackers)
ret = []
for trk in reversed(self.trackers):
if (trk.time_since_update < 1) and trk.hit_streak >= self.min_hits:
ret.append(trk)
i -= 1
# remove dead tracklet
if (trk.time_since_update > self.max_age):
self.trackers.pop(i)
return ret
def show_predicts(self, ret):
for tracker in ret:
tracker.show_predict(self.screen)
def check_areas(self, ai_settings, screen, ship, area, bullets):
for particle_group in self.trackers:
target = particle_group.rect
if (target.centerx - area.rect.centerx) ** 2 + (
target.centery - area.rect.y) ** 2 < area.radius ** 2:
from game_manage.game_run import fire_bullet
fire_bullet(ai_settings, screen, ship, bullets,
(target.centerx, target.centery))
def linear_assignment(self, cost_matrix):
try:
import lap
_, x, y = lap.lapjv(cost_matrix, extend_cost=True)
return numpy.array([[y[i], i] for i in x if i >= 0]) #
except ImportError:
from scipy.optimize import linear_sum_assignment
x, y = linear_sum_assignment(cost_matrix)
return numpy.array(list(zip(x, y)))
def iou(self, detection, tracker):
"""
Computes IUO between two bboxes in the form [x1,y1,x2,y2]
"""
bb_test = [detection.left, detection.top, detection.right - 1,
detection.bottom - 1]
bb_gt = [tracker.left, tracker.top, tracker.right - 1,
tracker.bottom - 1]
xx1 = numpy.maximum(bb_test[0], bb_gt[0])
yy1 = numpy.maximum(bb_test[1], bb_gt[1])
xx2 = numpy.minimum(bb_test[2], bb_gt[2])
yy2 = numpy.minimum(bb_test[3], bb_gt[3])
w = numpy.maximum(0., xx2 - xx1)
h = numpy.maximum(0., yy2 - yy1)
wh = w * h
o = wh / ((bb_test[2] - bb_test[0]) * (bb_test[3] - bb_test[1])
+ (bb_gt[2] - bb_gt[0]) * (bb_gt[3] - bb_gt[1]) - wh)
return o
|
2015211289/pygame
|
auto_review.py
|
auto_review.py
|
py
| 12,073 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "random.gauss",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "random.gauss",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2HSV",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "cv2.calcHist",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.calcHist",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.normalize",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.normalize",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cv2.compareHist",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "cv2.HISTCMP_BHATTACHARYYA",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "cv2.compareHist",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.HISTCMP_BHATTACHARYYA",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "random.uniform",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "pygame.pixelcopy.surface_to_array",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "pygame.pixelcopy",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "cv2.subtract",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "cv2.findContours",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTERNAL",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "cv2.boundingRect",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "numpy.stack",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2HSV",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "cv2.calcHist",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "cv2.calcHist",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "cv2.normalize",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "cv2.normalize",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "game_manage.game_run.fire_bullet",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "lap.lapjv",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.linear_sum_assignment",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 334,
"usage_type": "call"
}
] |
23759924215
|
import requests
import json
def test_shorten_new_url():
orig_url = "http://google.com"
resp = requests.post("http://localhost:8888/shorten", params={"orig_url":orig_url})
url = json.loads(resp.text).get("url")
resp = requests.post("http://localhost:8888", params={"short_url": url})
assert (resp.url.find("google") > 0)
def main():
test_shorten_new_url()
if __name__ == "__main__":
main()
|
anandjeyahar/urlshortener
|
test_url_shorten.py
|
test_url_shorten.py
|
py
| 421 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "requests.post",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 8,
"usage_type": "call"
}
] |
43969342716
|
#!/usr/bin/env python
import argparse
from Bio import SeqIO
def extract_starts(fasta):
codon_usage = {}
for record in SeqIO.parse(fasta, "fasta"):
seq = record.seq[0:3]
sseq = str(seq)
try: # If key exists, count += 1
codon_usage[sseq] = (codon_usage[sseq][0] + 1, seq)
except KeyError: # Else, create tuple (count, sequence)
codon_usage[sseq] = (1, seq)
for (sseq, (count, seq)) in sorted(codon_usage.items()):
yield (sseq, codon_usage[sseq][0], seq.translate(table=11))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Summarise start codon usage", epilog=""
)
parser.add_argument("fasta", type=argparse.FileType("r"), help="Fasta Genome")
args = parser.parse_args()
print("# DNA\tCodon\tCount")
for (key, value, codon) in extract_starts(**vars(args)):
print("{}\t{}\t{}".format(key, codon, value))
|
TAMU-CPT/galaxy-tools
|
tools/fasta/start_stats.py
|
start_stats.py
|
py
| 950 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "Bio.SeqIO.parse",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 24,
"usage_type": "call"
}
] |
71504118267
|
from __future__ import annotations
from io import BufferedIOBase, BytesIO
from typing import List, Optional
from helper import (
byte_to_int,
encode_varstr,
hash160,
int_to_byte,
int_to_little_endian,
little_endian_to_int,
read_varint,
sha256,
)
from op import (
decode_num,
encode_minimal_num,
is_number_op_code,
number_to_op_code,
op_code_to_number,
OP_0,
OP_CHECKLOCKTIMEVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSEQUENCEVERIFY,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_DROP,
OP_DUP,
OP_EQUAL,
OP_EQUALVERIFY,
OP_FROMALTSTACK,
OP_HASH160,
OP_IF,
OP_NOTIF,
OP_PUSHDATA1,
OP_PUSHDATA2,
OP_TOALTSTACK,
OP_VERIFY,
OP_CODE_NAMES,
OP_CODE_FUNCTIONS,
)
from timelock import Locktime, Sequence
from witness import Witness
class Script(list):
def __add__(self, other: Script) -> Script:
return self.__class__(super().__add__(other))
def __radd__(self, other: Script) -> Script:
o = self.__class__(other)
return o + self
def __new__(cls,
commands: Optional[List[Union(bytes, str)]] = None) -> Script:
if commands is None:
commands = []
for current in commands:
if type(current) not in (bytes, ):
raise ValueError(
f'Every command should be bytes or str, got {current} instead'
)
return super().__new__(cls, commands)
def __repr__(self) -> str:
result = ''
for current in self:
if OP_CODE_NAMES.get(current):
result += f'{OP_CODE_NAMES[current]} '
elif type(current) == str:
result += f'<{current}> '
else:
result += f'{current.hex()} '
return result
@classmethod
def parse(cls, s: BufferedIOBase) -> Script:
# get the length of the entire field
length = read_varint(s)
# initialize the commands array
commands = []
# initialize the number of bytes we've read to 0
count = 0
# loop until we've read length bytes
while count < length:
# get the current byte
current = s.read(1)
# increment the bytes we've read
count += 1
# convert the current byte to an integer
current_int = current[0]
# if the current byte is between 1 and 75 inclusive
if current_int <= 75:
# add the next n bytes as a command
commands.append(s.read(current_int))
count += current_int
elif current == OP_PUSHDATA1:
# op_pushdata1
data_length = byte_to_int(s.read(1))
commands.append(s.read(data_length))
count += data_length + 1
elif current == OP_PUSHDATA2:
# op_pushdata2
data_length = little_endian_to_int(s.read(2))
commands.append(s.read(data_length))
count += data_length + 2
else:
# add the command to the list of commands
commands.append(current)
if count != length:
raise SyntaxError(f'parsing script failed {commands}')
return cls(commands)
def miniscript(self):
from miniscript import MiniScript
return MiniScript.from_script(Script(self[:]))
def is_locktime_locked(self) -> bool:
'''Returns whether the script starts with
<locktime> OP_CLTV OP_DROP'''
return len(self) >= 3 and \
(is_number_op_code(self[0]) or len(self[0]) > 1) and \
self[1] == OP_CHECKLOCKTIMEVERIFY and self[2] == OP_DROP
def is_multisig(self) -> bool:
'''Returns whether the script follows the
OP_k <pubkey1>...<pubkeyn> OP_n OP_CHECKMULTISIG pattern'''
if self[-1] != OP_CHECKMULTISIG:
return False
if not is_number_op_code(self[-2]):
return False
n = op_code_to_number(self[-2])
if len(self) < n + 3:
return False
for current in self[-n - 2:-2]:
if len(current) != 33:
return False
if not is_number_op_code(self[-n - 3]):
return False
k = op_code_to_number(self[-n - 3])
if k < 1 or k > 15:
return False
if n < k or n > 15:
return False
return True
def is_multisig_timelock(self) -> bool:
'''Returns whether the script follows the
<locktime> OP_CLTV/OP_CSV OP_DROP OP_k <pubkey1>...<pubkeyn> OP_n OP_CHECKMULTISIG pattern'''
return (self.is_sequence_locked() or self.is_locktime_locked()) and \
self.is_multisig()
def is_p2pkh(self) -> bool:
'''Returns whether the script follows the
OP_DUP OP_HASH160 <20 byte hash> OP_EQUALVERIFY OP_CHECKSIG pattern.'''
# there should be exactly 5 commands
# OP_DUP, OP_HASH160, 20-byte hash, OP_EQUALVERIFY, OP_CHECKSIG
return len(self) == 5 and self[0] == OP_DUP and self[1] == OP_HASH160 \
and len(self[2]) == 20 and self[3] == OP_EQUALVERIFY \
and self[4] == OP_CHECKSIG
def is_p2sh(self) -> bool:
'''Returns whether the script follows the
OP_HASH160 <20 byte hash> OP_EQUAL pattern.'''
# there should be exactly 3 commands
# OP_HASH160, 20-byte hash, OP_EQUAL
return len(self) == 3 and self[0] == OP_HASH160 and len(self[1]) == 20 \
and self[2] == OP_EQUAL
def is_p2wpkh(self) -> bool:
'''Returns whether the script follows the
OP_0 <20 byte hash> pattern.'''
return len(self) == 2 and self[0] == OP_0 and len(self[1]) == 20
def is_p2wsh(self) -> bool:
'''Returns whether the script follows the
OP_0 <32 byte hash> pattern.'''
return len(self) == 2 and self[0] == OP_0 and len(self[1]) == 32
def is_segwit(self) -> bool:
return self.is_p2wpkh() or self.is_p2wsh()
def is_sequence_locked(self) -> bool:
'''Returns whether the script starts with
<sequence> OP_CSV OP_DROP'''
return len(self) >= 3 and \
(is_number_op_code(self[0]) or len(self[0]) > 1) and \
self[1] == OP_CHECKSEQUENCEVERIFY and self[2] == OP_DROP
def is_timelock(self) -> bool:
'''Returns whether the script follows the
locktime OP_CLTV OP_DROP <pubkey> OP_CHECKSIG pattern'''
return (self.is_sequence_locked() or self.is_locktime_locked()) and \
len(self) == 5 and len(self[3]) == 33 and self[4] == OP_CHECKSIG
def pubkeys(self) -> List[bytes]:
pubkeys = []
for item in self:
if len(item) == 33 and item[0] in (2, 3):
pubkeys.append(item)
return pubkeys
def raw_serialize(self) -> bytes:
# initialize what we'll send back
result = b''
# go through each command
for current in self:
if current == OP_0:
result += int_to_byte(0)
elif OP_CODE_NAMES.get(current) is None:
# this is an element
# get the length in bytes
length = len(current)
# for large lengths, we have to use a pushdata op code
if length < 75:
# turn the length into a single byte integer
result += int_to_byte(length)
elif length > 75 and length < 0x100:
# 76 is pushdata1
result += OP_PUSHDATA1
result += int_to_byte(length)
elif length >= 0x100 and length <= 520:
# 77 is pushdata2
result += OP_PUSHDATA2
result += int_to_little_endian(length, 2)
else:
raise ValueError('too long a command')
result += current
return result
def serialize(self) -> bytes:
return encode_varstr(self.raw_serialize())
class ScriptPubKey(Script):
'''Represents a ScriptPubKey in a transaction'''
@classmethod
def parse(cls, s: BufferedIOBase) -> ScriptPubKey:
script_pubkey = super().parse(s)
if script_pubkey.is_p2pkh():
return PKHScriptPubKey.from_hash(script_pubkey[2])
elif script_pubkey.is_p2sh():
return SHScriptPubKey.from_hash(script_pubkey[1])
elif script_pubkey.is_p2wpkh():
return WPKHScriptPubKey.from_hash(script_pubkey[1])
elif script_pubkey.is_p2wsh():
return WSHScriptPubKey.from_hash(script_pubkey[1])
else:
return script_pubkey
def redeem_script(self) -> RedeemScript:
'''Convert this ScriptPubKey to its RedeemScript equivalent'''
return RedeemScript(self)
class PKHScriptPubKey(ScriptPubKey):
@classmethod
def from_hash(cls, h160: bytes) -> PKHScriptPubKey:
if len(h160) != 20:
raise TypeError('h160 should be 20 bytes')
return cls([OP_DUP, OP_HASH160, h160, OP_EQUALVERIFY, OP_CHECKSIG])
def hash160(self) -> bytes:
return self[2]
class SHScriptPubKey(ScriptPubKey):
@classmethod
def from_hash(cls, h160: bytes) -> SHScriptPubKey:
if len(h160) != 20:
raise TypeError('h160 should be 20 bytes')
return cls([OP_HASH160, h160, OP_EQUAL])
def hash160(self) -> bytes:
return self[1]
class RedeemScript(Script):
'''Subclass that represents a RedeemScript for p2sh'''
def hash160(self) -> bytes:
'''Returns the hash160 of the serialization of the RedeemScript'''
return hash160(self.raw_serialize())
def script_pubkey(self) -> SHScriptPubKey:
'''Returns the ScriptPubKey that this RedeemScript corresponds to'''
return SHScriptPubKey.from_hash(self.hash160())
class SegwitPubKey(ScriptPubKey):
def hash(self) -> bytes:
return self[1]
class WPKHScriptPubKey(SegwitPubKey):
@classmethod
def from_hash(cls, h160: bytes) -> WPKHScriptPubKey:
if len(h160) != 20:
raise TypeError('h160 should be 20 bytes')
return cls([OP_0, h160])
class WSHScriptPubKey(SegwitPubKey):
@classmethod
def from_hash(cls, s256: bytes) -> WSHScriptPubKey:
if len(s256) != 32:
raise TypeError('s256 should be 32 bytes')
return cls([OP_0, s256])
class WitnessScript(Script):
'''Subclass that represents a WitnessScript for p2wsh'''
def redeem_script(self) -> RedeemScript:
return self.script_pubkey().redeem_script()
def script_pubkey(self) -> WSHScriptPubKey:
'''Generates the ScriptPubKey for p2wsh'''
# get the sha256 of the current script
# return new p2wsh script using p2wsh_script
return WSHScriptPubKey.from_hash(self.sha256())
def sha256(self) -> bytes:
'''Returns the sha256 of the raw serialization for witness program'''
return sha256(self.raw_serialize())
class MultiSigScript(Script):
@classmethod
def from_pubkeys(cls, k: int, sec_pubkeys: List[bytes]) -> MultiSigScript:
n = len(sec_pubkeys)
if k == 0 or k > n:
raise ValueError(f'cannot do {k} of {n} keys')
return cls([
number_to_op_code(k), *sorted(sec_pubkeys),
number_to_op_code(n), OP_CHECKMULTISIG
])
class MultiSigRedeemScript(RedeemScript, MultiSigScript):
pass
class MultiSigWitnessScript(WitnessScript, MultiSigScript):
pass
class TimelockScript(Script):
@classmethod
def from_time(cls,
locktime: Optional[Locktime] = None,
sequence: Optional[Sequence] = None) -> List[bytes]:
if locktime is not None:
return [
encode_minimal_num(locktime), OP_CHECKLOCKTIMEVERIFY, OP_DROP
]
elif sequence is not None:
return [
encode_minimal_num(sequence), OP_CHECKSEQUENCEVERIFY, OP_DROP
]
else:
raise ValueError('locktime or sequence required')
class SingleSigTimelockScript(TimelockScript):
@classmethod
def from_pubkey_time(
cls,
sec: bytes,
locktime: Optional[Locktime] = None,
sequence: Optional[Sequence] = None) -> SingleSigTimelockScript:
script = cls.from_time(locktime, sequence) + [sec, OP_CHECKSIG]
return cls(script)
class SingleSigTimelockRedeemScript(RedeemScript, SingleSigTimelockScript):
pass
class SingleSigTimelockWitnessScript(WitnessScript, SingleSigTimelockScript):
pass
class MultiSigTimelockScript(TimelockScript, MultiSigScript):
@classmethod
def from_pubkeys_time(
cls,
k: int,
sec_pubkeys: List[bytes],
locktime: Optional[Locktime] = None,
sequence: Optional[Sequence] = None) -> MultiSigTimelockScript:
script = cls.from_time(locktime, sequence) + cls.from_pubkeys(
k, sec_pubkeys)
return cls(script)
class MultiSigTimelockRedeemScript(RedeemScript, MultiSigTimelockScript):
pass
class MultiSigTimelockWitnessScript(WitnessScript, MultiSigTimelockScript):
pass
|
jimmysong/minipy
|
script.py
|
script.py
|
py
| 13,382 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "typing.Optional",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "op.OP_CODE_NAMES.get",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "op.OP_CODE_NAMES",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "op.OP_CODE_NAMES",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "io.BufferedIOBase",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "helper.read_varint",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "op.OP_PUSHDATA1",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "helper.byte_to_int",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "op.OP_PUSHDATA2",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "helper.little_endian_to_int",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "miniscript.MiniScript.from_script",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "miniscript.MiniScript",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "{'MiniScript': 'miniscript.MiniScript'}",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "op.is_number_op_code",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "op.OP_CHECKLOCKTIMEVERIFY",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "op.OP_DROP",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "op.OP_CHECKMULTISIG",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "op.is_number_op_code",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "op.op_code_to_number",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "op.is_number_op_code",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "op.op_code_to_number",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "op.OP_DUP",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "op.OP_HASH160",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "op.OP_EQUALVERIFY",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "op.OP_CHECKSIG",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "op.OP_HASH160",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "op.OP_EQUAL",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "op.OP_0",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "op.OP_0",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "op.is_number_op_code",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "op.OP_CHECKSEQUENCEVERIFY",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "op.OP_DROP",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "op.OP_CHECKSIG",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "op.OP_0",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "helper.int_to_byte",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "op.OP_CODE_NAMES.get",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "op.OP_CODE_NAMES",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "helper.int_to_byte",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "op.OP_PUSHDATA1",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "helper.int_to_byte",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "op.OP_PUSHDATA2",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "helper.int_to_little_endian",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "helper.encode_varstr",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "io.BufferedIOBase",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "op.OP_DUP",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "op.OP_HASH160",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "op.OP_EQUALVERIFY",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "op.OP_CHECKSIG",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "op.OP_HASH160",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "op.OP_EQUAL",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "helper.hash160",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "op.OP_0",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "op.OP_0",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "helper.sha256",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "op.number_to_op_code",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "op.number_to_op_code",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "op.OP_CHECKMULTISIG",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "timelock.Locktime",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "timelock.Sequence",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "op.encode_minimal_num",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "op.OP_CHECKLOCKTIMEVERIFY",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "op.OP_DROP",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "op.encode_minimal_num",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "op.OP_CHECKSEQUENCEVERIFY",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "op.OP_DROP",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "timelock.Locktime",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "timelock.Sequence",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "op.OP_CHECKSIG",
"line_number": 372,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 389,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "timelock.Locktime",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 391,
"usage_type": "name"
},
{
"api_name": "timelock.Sequence",
"line_number": 391,
"usage_type": "name"
}
] |
1445709781
|
from datetime import datetime
import pandas as pd
import numpy as np
import quandl
from arctic.exceptions import NoDataFoundException
from .base import BaseBacktestObject
from ..utils import keys
from ..utils.config import AdagioConfig
from ..utils.const import (FutureContractMonth, Denominator, PriceSkipDates,
ReturnSkipDates, FuturesInfo, RETURN_KEY_PRIORITY,
VOLUME_KEY_PRIORITY)
from ..utils.date import date_shift
from ..utils.logging import get_logger
from ..utils.mongo import get_library
from ..utils.quandl import futures_contract_name, futures_contract_month, year
logger = get_logger(name=__name__)
class QuandlGeneric(BaseBacktestObject):
def __init__(self, **backtest_params):
super(QuandlGeneric, self).__init__(**backtest_params)
class GenericInstrument(BaseBacktestObject):
def __init__(self, **backtest_params):
super(GenericInstrument, self).__init__(**backtest_params)
class TrueFXInstrument(BaseBacktestObject):
def __init__(self, **backtest_params):
super(TrueFXInstrument, self).__init__(**backtest_params)
class QuandlFutures(BaseBacktestObject):
def __init__(self, **backtest_params):
super(QuandlFutures, self).__init__(**backtest_params)
self.data = None
self.roll_date = None
self.position = None
self.is_expired = False
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
self[keys.quandl_ticker])
@property
def name(self):
""" Return quandl ticker """
return self[keys.quandl_ticker]
@property
def contract_name(self):
""" Return contract name such as ES for S&P500 mini futures """
return futures_contract_name(self[keys.quandl_ticker])
@property
def contract_month_code(self):
""" Return month string representing the delivery month """
return futures_contract_month(self[keys.quandl_ticker])
@property
def contract_month(self):
""" Return delivery month """
return FutureContractMonth[self.contract_month_code].value
@property
def contract_month_dt(self):
""" Return the beginning of the month of delivery """
return datetime(self.year, self.contract_month, 1).date()
@property
def year(self):
""" Return delivery year """
return year(self[keys.quandl_ticker])
@property
def price_for_return(self):
""" Return a series for returns """
return self.data[self.get_return_key()]
@property
def has_volume(self):
""" Return if data contains volume information """
try:
_ = self.get_volume_key()
return True
except ValueError:
return False
def get_final_positions(self):
""" Return final position (adjusted by signals etc.)
Trading lags are already applied so that the final return can be
calculated by final_position * returns.
"""
return self.position.prod(axis=1).rename('final_position')
def get_final_gross_returns(self):
""" Return final gross returns for its contract using final_positions
Any slippage is not deducted.
"""
return ((self.calc_return() * self.get_final_positions())
.rename('final_gross_returns'))
def get_final_net_returns(self):
""" Return final net returns for its contract using final_positions
Cost = tick size * slippage * trade amount / price_t
trade amount is proportional to the trading size and is at least 1.0
every time we trade.
"""
# trade_amount = 1 if there is a transaction
final_positions = self.get_final_positions()
trade_amount = (final_positions.diff()
.shift(-1) # trading lag is already added to positions
.fillna(0.0)
.abs())
# initial entry if the position is held from start
start_date = self.data.index[0]
trade_amount[start_date] = abs(final_positions[start_date])
cost = (self.price_for_return.pow(-1)
.replace([np.inf, -np.inf], np.nan)
.fillna(method='pad')
.mul(self[keys.tick_size] * self[keys.slippage])
.mul(trade_amount))
return ((self.get_final_gross_returns() - cost)
.rename('final_net_returns'))
def get_final_returns(self, is_gross=True):
if is_gross:
return self.get_final_gross_returns()
else:
return self.get_final_net_returns()
def backtest(self, start_date, end_date, *args, **kwargs):
""" Get data from Quandl and clean it. Positions are calculated
according to start_date and end_date (both including).
:param start_date:
:param end_date:
:return:
"""
logger.info('Run layers: {}'.format(self))
# load data
self.data = self.load_data()
self.data = self.clean_data()
self.roll_date = end_date
# determine base positions based on the roll date
logger.debug('Determining base positions')
self.position = pd.DataFrame(0.0, columns=['base'],
index=self.data.index)
self.position.loc[slice(start_date, end_date), 'base'] = 1.0
self[keys.start_date] = start_date
self[keys.end_date] = end_date
def check_if_expired(self, data):
""" Check if the contract is expired """
if data.index[-1] >= self.last_trade_date():
# if data contains the last trade date
self.is_expired = True
else:
today = datetime.today()
if data.index[-1] < date_shift(today, '-1y'):
# if data is very old the contract is assumed to be expired
self.is_expired = True
def load_from_quandl(self):
""" Download data from quandl """
logger.debug('Downloading data from Quandl')
data = quandl.get(self[keys.quandl_ticker],
api_key=AdagioConfig.quandl_token)
self.check_if_expired(data)
return data
def to_mongo(self, library, data):
""" Save data to MongoDB """
logger.debug('Pushing data to MongoDB')
library.write(self[keys.quandl_ticker], data)
def load_data(self):
""" Load data either from MongoDB stored locally """
library = get_library(keys.quandl_contract)
item = library.read(self[keys.quandl_ticker])
data = item.data
self.check_if_expired(data)
return data
def update_database(self):
""" Update local database by checking Quandl if they have the latest
data """
library = get_library(keys.quandl_contract)
try:
item = library.read(self[keys.quandl_ticker])
data = item.data
self.check_if_expired(data)
if not self.is_expired or self[keys.force_download]:
# re-download data. Quandl might have more recent data
data = self.load_from_quandl()
self.to_mongo(library, data)
else:
logger.debug('Load data from MongoDB')
except NoDataFoundException:
# if not found in MongoDB, then it tries to get data from Quandl
data = self.load_from_quandl()
self.to_mongo(library, data)
def get_date(self, shift_string):
""" Shift date from the delivery month-begin """
return date_shift(self.contract_month_dt, shift_string)
def last_trade_date(self):
return self.get_date(self[keys.last_trade_date])
def first_notice_date(self):
if self[keys.first_notice_date] is None:
raise ValueError('{} not found.'.format(keys.first_notice_date))
return self.get_date(self[keys.first_notice_date])
def get_roll_base_date(self):
""" Return roll base date from which the actual roll date is
calculated. This is either first notice date or last trade date. """
if self[keys.first_notice_date] is not None:
first_notice_date = self.first_notice_date()
last_trade_date = self.last_trade_date()
return min(first_notice_date, last_trade_date)
else:
return self.last_trade_date()
def get_roll_date(self, roll_rule):
""" Return roll date
If there is no first notice date, then the roll date is X-days before
its last trade date
If the contract has a setting for first notice date, then the roll date
is min(X-days before last trade date, X-days before first notice date)
"""
return date_shift(self.get_roll_base_date(), roll_rule)
def clean_data(self):
""" Clean erroneous dates """
cleaned_data = self.data
if self[keys.lo_ticker] in PriceSkipDates.__members__.keys():
dates = PriceSkipDates[self[keys.lo_ticker]].value
skip_dates = pd.DatetimeIndex(dates)
to_nan = [t for t in skip_dates if t in cleaned_data.index]
cleaned_data.loc[to_nan] = None
cleaned_data = (cleaned_data
.fillna(method='pad')
.fillna(method='bfill'))
# remove when all prices are zero or negative for some reason
flgs = self.price_for_return > 0
cleaned_data = (cleaned_data.where(flgs).fillna(method='pad')
.fillna(method='bfill'))
if self[keys.lo_ticker] in __fut_clean_func__.keys():
cleaned_data = __fut_clean_func__[self[keys.lo_ticker]](
cleaned_data)
return cleaned_data
def _trim_data(self):
""" Trim underlying data based on the backtest period """
period = slice(self[keys.backtest_start_date],
self[keys.backtest_end_date])
self.data = self.data.loc[period, :]
self.position = self.position.loc[period, :]
def get_return_key(self):
""" Return a column name used to be used for calculating returns """
for return_key in RETURN_KEY_PRIORITY:
if return_key in self.data.keys():
return return_key
raise ValueError('No return key found. Data contains {}'
.format(self.data.keys()))
def get_volume_key(self):
""" Return a column name used to be used for calculating returns """
for return_key in VOLUME_KEY_PRIORITY:
if return_key in self.data.keys():
return return_key
raise ValueError('No volume key found. Data contains {}'
.format(self.data.keys()))
def calc_return(self):
""" Calculate returns and clean it if necessary """
return_raw = self._calc_return_raw()
if self[keys.lo_ticker] in [i.name for i in ReturnSkipDates]:
fix_date = ReturnSkipDates[self[keys.lo_ticker]].value
for d in fix_date:
if d in return_raw.index:
return_raw[d] = 0.0
return_raw = self.convert_return_ccy(return_raw)
return return_raw
def _calc_return_raw(self):
""" Return raw returns according to the denominator.
(Fully-collateralised returns)
If denominator is not specified, returns are just percentage changes.
Other denominators follow the market conventions and/or the data format.
"""
if self[keys.denominator] is None:
base_price = self._get_base_price()
else:
if self[keys.denominator] == Denominator.GOVT_FUT.value:
base_price = self._get_base_price(100.0)
elif self[keys.denominator] == Denominator.MM_FUT.value:
base_price = self._get_base_price(100.0 * 0.25)
else:
raise ValueError("{} is not a valid denominator."
.format(self[keys.denominator]))
return self.price_for_return.diff().div(base_price.shift()).fillna(0)
def _get_base_price(self, constant=None):
""" Get base price series that will be used as a denominator
for return calculation """
if constant is not None:
base_price = pd.Series(constant, index=self.position.index)
else:
final_position = self.get_final_positions()
position_change = (final_position
.shift(-1)
.fillna(method='pad')
.diff()
.abs()
.pipe(np.sign))
# initial position
position_change = (position_change
.fillna(final_position.abs().pipe(np.sign)))
base_price = (self.price_for_return
.where(position_change == 1)
.fillna(method='pad'))
# base_return is later shifted by 1 period when multiplied by
# price change
return base_price
def convert_return_ccy(self, returns):
""" Convert returns series into the backtest currency
This calculation assumes that the position is fully-collateralised
and the initial collateral is fully fx-hedged.
:param returns: returns series measured in the contract currency
:return:
"""
if self[keys.contract_ccy] == self[keys.backtest_ccy]:
return returns
else:
library = get_library(keys.fx_rates)
symbols = library.list_symbols(regex=self[keys.contract_ccy])
if len(symbols) > 1:
raise ValueError('Multiple fx rates found')
fx_rates = library.read(symbols[0])
fx_rates = fx_rates.data
if fx_rates.name == '{}/{}'.format(self[keys.contract_ccy],
self[keys.backtest_ccy]):
# use fx rates as is
pass
elif fx_rates.name == '{}/{}'.format(self[keys.backtest_ccy],
self[keys.contract_ccy]):
fx_rates = fx_rates.pow(-1)
fx_adj = (fx_rates
.reindex(returns.index)
.fillna(method='pad')
.pct_change()
.fillna(0)
.add(1.0)
)
return (returns * fx_adj).rename(returns.name)
def _clean_jgb_prices(df):
df[:'2018-01-18'] *= 0.1
return df
__fut_clean_func__ = {
FuturesInfo.SGX_JB.name: _clean_jgb_prices
}
|
thoriuchi0531/adagio
|
adagio/layers/contract.py
|
contract.py
|
py
| 14,880 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "utils.logging.get_logger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "base.BaseBacktestObject",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "base.BaseBacktestObject",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "base.BaseBacktestObject",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "base.BaseBacktestObject",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "utils.keys.quandl_ticker",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "utils.keys.quandl_ticker",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "utils.quandl.futures_contract_name",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "utils.keys.quandl_ticker",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "utils.quandl.futures_contract_month",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "utils.keys.quandl_ticker",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "utils.const.FutureContractMonth",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "utils.quandl.year",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "utils.keys.quandl_ticker",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "numpy.inf",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "utils.keys.tick_size",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "utils.keys.slippage",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "utils.keys.start_date",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "utils.keys.end_date",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "utils.date.date_shift",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "quandl.get",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "utils.keys.quandl_ticker",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "utils.config.AdagioConfig.quandl_token",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "utils.config.AdagioConfig",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "utils.keys.quandl_ticker",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "utils.mongo.get_library",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "utils.keys.quandl_contract",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "utils.keys.quandl_ticker",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "utils.mongo.get_library",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "utils.keys.quandl_contract",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "utils.keys.quandl_ticker",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "utils.keys.force_download",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "arctic.exceptions.NoDataFoundException",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "utils.date.date_shift",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "utils.keys.last_trade_date",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "utils.keys.first_notice_date",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "utils.keys.first_notice_date",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "utils.keys.first_notice_date",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "utils.keys.first_notice_date",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "utils.date.date_shift",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "utils.keys.lo_ticker",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "utils.const.PriceSkipDates.__members__.keys",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "utils.const.PriceSkipDates.__members__",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "utils.const.PriceSkipDates",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "utils.const.PriceSkipDates",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "utils.keys.lo_ticker",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "pandas.DatetimeIndex",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "utils.keys.lo_ticker",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "utils.keys.lo_ticker",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "utils.keys.backtest_start_date",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "utils.keys.backtest_end_date",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "utils.const.RETURN_KEY_PRIORITY",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "utils.const.VOLUME_KEY_PRIORITY",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "utils.keys.lo_ticker",
"line_number": 298,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "utils.const.ReturnSkipDates",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "utils.const.ReturnSkipDates",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "utils.keys.lo_ticker",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "utils.keys.denominator",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "utils.keys.denominator",
"line_number": 318,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "utils.const.Denominator.GOVT_FUT",
"line_number": 318,
"usage_type": "attribute"
},
{
"api_name": "utils.const.Denominator",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "utils.keys.denominator",
"line_number": 320,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "utils.const.Denominator.MM_FUT",
"line_number": 320,
"usage_type": "attribute"
},
{
"api_name": "utils.const.Denominator",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "utils.keys.denominator",
"line_number": 324,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 324,
"usage_type": "name"
},
{
"api_name": "pandas.Series",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "numpy.sign",
"line_number": 342,
"usage_type": "attribute"
},
{
"api_name": "utils.keys.contract_ccy",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "utils.keys.backtest_ccy",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "utils.mongo.get_library",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "utils.keys.fx_rates",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 365,
"usage_type": "name"
},
{
"api_name": "utils.keys.contract_ccy",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "utils.keys.contract_ccy",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "utils.keys.backtest_ccy",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "utils.keys.backtest_ccy",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "utils.keys.contract_ccy",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "utils.keys",
"line_number": 379,
"usage_type": "name"
},
{
"api_name": "utils.const.FuturesInfo.SGX_JB",
"line_number": 398,
"usage_type": "attribute"
},
{
"api_name": "utils.const.FuturesInfo",
"line_number": 398,
"usage_type": "name"
}
] |
39399812817
|
import os
import sys
from functools import partial
from typing import List, Tuple
import numpy as np
import tensorflow as tf
from custom_utils import *
# ------------------------- Function for building cnn ------------------------ #
def build_cnn(filters_list: List[int],
conv2d_regularizer_decay: float,
dense_units_list: List[int],
dense_regularizer_decay: float,
kernel_size: int,
dropout_rate: float,
batch_norm_momentum: float,
learning_rate: float,
clipnorm: float,
input_shape: Tuple[int] = (28, 28, 1)) -> tf.keras.models.Sequential:
"""
Build and compile a convolutional neural network with the following architecture:
- 5 convolutional layers with 3 x 3 kernel size and ReLU activation
- 3 max pooling layers with 2 x 2 pool size
- 2 dense layers with ReLU activation
- 1 output layer with softmax activation
Parameters
----------
filters_list : List[int]
A list of integers representing the filter dimensions outputted by each convolutional layer
conv2d_regularizer_decay : float
L2 regularization decay for convolutional layers
dense_units_list : List[int]
A list of integers representing the number of units in each dense layer
dense_regularizer_decay : float
L2 regularization decay for dense layers
kernel_size : int
Size of the kernel for the first convolutional layer
dropout_rate : float
Dropout rate for the dropout layers
batch_norm_momentum : float
Momentum for the batch normalization layers
learning_rate : float
Learning rate for the Adam optimizer
clipnorm : float
Clipnorm for the Adam optimizer
input_shape : Tuple[int], optional
Dimension of the input feature vector, by default (28, 28, 1)
Returns
-------
tf.keras.models.Sequential
A compiled convolutional neural network
"""
# Default convolutional layer
DefaultConv2D = partial(
tf.keras.layers.Conv2D,
kernel_size=3,
padding='same',
activation='relu',
kernel_initializer='he_normal',
kernel_regularizer=tf.keras.regularizers.l2(conv2d_regularizer_decay)
)
# Default dense layer
DefaultDense = partial(
tf.keras.layers.Dense,
activation='relu',
kernel_initializer='he_normal',
kernel_regularizer=tf.keras.regularizers.l2(dense_regularizer_decay)
)
# Model architecture
cnn_model = tf.keras.Sequential([
# First convolutional layer can have larger kernel size (more than 3 x 3)
DefaultConv2D(filters=filters_list[0], kernel_size=kernel_size, input_shape=input_shape),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
DefaultConv2D(filters=filters_list[1]),
DefaultConv2D(filters=filters_list[2]),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
DefaultConv2D(filters=filters_list[3]),
DefaultConv2D(filters=filters_list[4]),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
# The Dense layer expects a 1D array of features for each instance, so we need to flatten its inputs
tf.keras.layers.Flatten(),
DefaultDense(units=dense_units_list[0]),
tf.keras.layers.BatchNormalization(momentum=batch_norm_momentum),
tf.keras.layers.Dropout(dropout_rate),
DefaultDense(units=dense_units_list[1]),
tf.keras.layers.BatchNormalization(momentum=batch_norm_momentum),
tf.keras.layers.Dropout(dropout_rate),
DefaultDense(units=10, activation='softmax')
])
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate, clipnorm=clipnorm)
cnn_model.compile(
# Used when labels are a 1D integer vector rather than one-hotted
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=optimizer,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy')]
)
return cnn_model
if __name__ == '__main__':
logger = get_logger(__name__)
args = parser()
# ------------------------------ Data ingestion ------------------------------ #
data_handler = DataHandler(args.s3_bucket, args.s3_key)
X_train, y_train = data_handler.load_data(mode='train')
X_val, y_val = data_handler.load_data(mode='val')
logger.info(f'Successfully load training set with shapes {X_train.shape} and validation set with shapes {X_val.shape}')
# --------------------------- Build and train model -------------------------- #
cnn_model = build_cnn(
filters_list=[args.filter_dim_1, args.filter_dim_2, args.filter_dim_3, args.filter_dim_4, args.filter_dim_5],
conv2d_regularizer_decay=args.conv2d_regularizer_decay,
dense_units_list=[args.dense_units_1, args.dense_units_2],
dense_regularizer_decay=args.dense_regularizer_decay,
kernel_size=args.kernel_size,
dropout_rate=args.dropout_rate,
batch_norm_momentum=args.batch_norm_momentum,
learning_rate=args.learning_rate,
clipnorm=args.clipnorm
)
early_stopper = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=10, restore_best_weights=True)
cnn_model.fit(
x=X_train,
y=y_train,
batch_size=args.batch_size,
epochs=args.epochs,
callbacks=[early_stopper],
validation_data=(X_val, y_val),
verbose=2
)
logger.info(f'Best validation accuracy: {early_stopper.best}')
# Save model, a version number is needed for the TF serving container to load the model
cnn_model.save(os.path.join(args.model_dir, '00000000'))
|
YangWu1227/python-for-machine-learning
|
neural_network/projects/cnn_mnist_classification_sagemaker/src/train_entry.py
|
train_entry.py
|
py
| 5,786 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.regularizers.l2",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "functools.partial",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.regularizers.l2",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.Sequential",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.MaxPool2D",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.MaxPool2D",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.MaxPool2D",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Flatten",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.BatchNormalization",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dropout",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.BatchNormalization",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dropout",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.optimizers.Adam",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.metrics.SparseCategoricalAccuracy",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.callbacks.EarlyStopping",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 148,
"usage_type": "attribute"
}
] |
25070637475
|
import graphene
from graphene.types import generic
from graphene_django.rest_framework.serializer_converter import (
convert_serializer_to_input_type as serializer_to_input,
)
from purplship.server.core import serializers
from purplship.server.serializers import make_fields_optional, exclude_id_field
import purplship.server.graph.serializers as model_serializers
import purplship.server.graph.utils as utils
def create_address_input(partial: bool = False) -> graphene.InputObjectType:
_method = "Partial" if partial else ""
_type = (
make_fields_optional(model_serializers.AddressModelSerializer)
if partial
else exclude_id_field(model_serializers.AddressModelSerializer)
)
return type(
f"{_method}Address",
(serializer_to_input(_type),),
dict(
country_code=utils.CountryCodeEnum(required=not partial),
),
)
def create_commodity_input(partial: bool = False) -> graphene.InputObjectType:
_method = "Partial" if partial else ""
_type = (
make_fields_optional(model_serializers.CommodityModelSerializer)
if partial
else exclude_id_field(model_serializers.CommodityModelSerializer)
)
return type(
f"{_method}Commodity",
(serializer_to_input(_type),),
dict(
parent_id=graphene.String(required=False),
weight_unit=utils.WeightUnitEnum(required=False),
origin_country=utils.CountryCodeEnum(required=False),
value_currency=utils.CurrencyCodeEnum(required=False),
metadata=generic.GenericScalar(),
),
)
def create_payment_input() -> graphene.InputObjectType:
return type(
"PartialPayment",
(serializer_to_input(serializers.Payment),),
dict(
paid_by=utils.PaidByEnum(required=False),
),
)
def create_duty_input() -> graphene.InputObjectType:
return type(
"PartialDuty",
(serializer_to_input(serializers.Duty),),
dict(
paid_by=utils.PaidByEnum(required=False),
currency=utils.CurrencyCodeEnum(required=False),
bill_to=graphene.Field(UpdateAddressInput, required=False),
),
)
def create_customs_input(partial: bool = False) -> graphene.InputObjectType:
_method = "Partial" if partial else ""
_type = (
make_fields_optional(model_serializers.CustomsModelSerializer)
if partial
else model_serializers.CustomsModelSerializer
)
return type(
f"{_method}Customs",
(serializer_to_input(_type),),
dict(
commodities=graphene.List(
UpdateCommodityInput if partial else CreateCommodityInput
),
incoterm=utils.IncotermCodeEnum(required=False),
content_type=utils.CustomsContentTypeEnum(required=False),
duty=graphene.Field(DutyInput, required=False),
options=generic.GenericScalar(),
),
)
def create_parcel_input(partial: bool = False) -> graphene.InputObjectType:
_method = "Partial" if partial else ""
_type = (
make_fields_optional(model_serializers.ParcelModelSerializer)
if partial
else exclude_id_field(model_serializers.ParcelModelSerializer)
)
return type(
f"{_method}Parcel",
(serializer_to_input(_type),),
dict(
items=graphene.List(
UpdateCommodityInput if partial else CreateCommodityInput
),
weight_unit=utils.WeightUnitEnum(required=False),
dimension_unit=utils.DimensionUnitEnum(required=False),
),
)
def create_label_template_input(partial: bool = False) -> graphene.InputObjectType:
_method = "Partial" if partial else ""
_type = (
make_fields_optional(model_serializers.LabelTemplateModelSerializer)
if partial
else exclude_id_field(model_serializers.LabelTemplateModelSerializer)
)
return type(
f"{_method}LabelTemplate",
(serializer_to_input(_type),),
dict(
template_type=utils.LabelTypeEnum(required=False),
),
)
def create_service_level_input(partial: bool = False) -> graphene.InputObjectType:
_method = "Partial" if partial else ""
_type = (
make_fields_optional(model_serializers.ServiceLevelModelSerializer)
if partial
else exclude_id_field(model_serializers.ServiceLevelModelSerializer)
)
return type(
f"{_method}ServiceLevel",
(serializer_to_input(_type),),
dict(
weight_unit=utils.WeightUnitEnum(required=False),
dimension_unit=utils.DimensionUnitEnum(required=False),
),
)
def create_connection_input(partial: bool = False) -> graphene.InputObjectType:
_method = "Update" if partial else "Create"
_fields = dict()
for name, serializer in model_serializers.CARRIER_MODEL_SERIALIZERS.items():
_extra_fields = dict()
_serializer = make_fields_optional(serializer) if partial else serializer
if hasattr(_serializer.Meta.model, "label_template"):
_extra_fields["label_template"] = graphene.Field(
UpdateLabelTemplateInput if partial else CreateLabelTemplateInput,
required=False,
)
if hasattr(_serializer.Meta.model, "services"):
_extra_fields["services"] = graphene.List(
UpdateServiceLevelInput if partial else CreateServiceLevelInput,
required=False,
)
_input = type(
f"{_method}{_serializer.__name__}",
(serializer_to_input(_serializer),),
_extra_fields,
)
_field = type(
f"{_method}{serializer.__name__}",
(_input,),
dict(
carrier_id=graphene.String(required=not partial),
metadata=generic.GenericScalar(),
),
)
_fields.update(
{
name: graphene.Field(_field, required=False),
}
)
return type("Settings", (object,), _fields)
PaymentInput = create_payment_input()
CreateCommodityInput = create_commodity_input()
UpdateCommodityInput = create_commodity_input(partial=True)
CreateAddressInput = create_address_input()
UpdateAddressInput = create_address_input(partial=True)
DutyInput = create_duty_input()
CreateCustomsInput = create_customs_input()
UpdateCustomsInput = create_customs_input(partial=True)
CreateParcelInput = create_parcel_input()
UpdateParcelInput = create_parcel_input(partial=True)
CreateAddressTemplateInput = type("CreateAddressTemplate", (CreateAddressInput,), {})
UpdateAddressTemplateInput = type("UpdateAddressTemplate", (UpdateAddressInput,), {})
CreateCustomsTemplateInput = type("CreateCustomsTemplate", (CreateCustomsInput,), {})
UpdateCustomsTemplateInput = type("UpdateCustomsTemplate", (UpdateCustomsInput,), {})
CreateParcelTemplateInput = type("CreateParcelTemplate", (CreateParcelInput,), {})
UpdateParcelTemplateInput = type("UpdateParcelTemplate", (UpdateParcelInput,), {})
CreateLabelTemplateInput = create_label_template_input()
UpdateLabelTemplateInput = create_label_template_input(partial=True)
CreateServiceLevelInput = create_service_level_input()
UpdateServiceLevelInput = create_service_level_input(partial=True)
CreateConnectionInput = create_connection_input()
UpdateConnectionInput = create_connection_input(partial=True)
|
danh91/purplship
|
server/modules/graph/purplship/server/graph/extension/base/inputs.py
|
inputs.py
|
py
| 7,522 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "purplship.server.serializers.make_fields_optional",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.AddressModelSerializer",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.exclude_id_field",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.AddressModelSerializer",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "graphene_django.rest_framework.serializer_converter.convert_serializer_to_input_type",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils.CountryCodeEnum",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "graphene.InputObjectType",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.serializers.make_fields_optional",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.CommodityModelSerializer",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.exclude_id_field",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.CommodityModelSerializer",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "graphene_django.rest_framework.serializer_converter.convert_serializer_to_input_type",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "graphene.String",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils.WeightUnitEnum",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "purplship.server.graph.utils.CountryCodeEnum",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "purplship.server.graph.utils.CurrencyCodeEnum",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "graphene.types.generic.GenericScalar",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "graphene.types.generic",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "graphene.InputObjectType",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "graphene_django.rest_framework.serializer_converter.convert_serializer_to_input_type",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.Payment",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.core.serializers",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "purplship.server.graph.utils.PaidByEnum",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "graphene.InputObjectType",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "graphene_django.rest_framework.serializer_converter.convert_serializer_to_input_type",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.Duty",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.core.serializers",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "purplship.server.graph.utils.PaidByEnum",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "purplship.server.graph.utils.CurrencyCodeEnum",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "graphene.Field",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "graphene.InputObjectType",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.serializers.make_fields_optional",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.CustomsModelSerializer",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "purplship.server.graph.serializers.CustomsModelSerializer",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "graphene_django.rest_framework.serializer_converter.convert_serializer_to_input_type",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "graphene.List",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils.IncotermCodeEnum",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "purplship.server.graph.utils.CustomsContentTypeEnum",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "graphene.Field",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "graphene.types.generic.GenericScalar",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "graphene.types.generic",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "graphene.InputObjectType",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.serializers.make_fields_optional",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.ParcelModelSerializer",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.exclude_id_field",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.ParcelModelSerializer",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "graphene_django.rest_framework.serializer_converter.convert_serializer_to_input_type",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "graphene.List",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils.WeightUnitEnum",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "purplship.server.graph.utils.DimensionUnitEnum",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "graphene.InputObjectType",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.serializers.make_fields_optional",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.LabelTemplateModelSerializer",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.exclude_id_field",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.LabelTemplateModelSerializer",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "graphene_django.rest_framework.serializer_converter.convert_serializer_to_input_type",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils.LabelTypeEnum",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "graphene.InputObjectType",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.serializers.make_fields_optional",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.ServiceLevelModelSerializer",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.exclude_id_field",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.ServiceLevelModelSerializer",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "graphene_django.rest_framework.serializer_converter.convert_serializer_to_input_type",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils.WeightUnitEnum",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "purplship.server.graph.utils.DimensionUnitEnum",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.utils",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "graphene.InputObjectType",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers.CARRIER_MODEL_SERIALIZERS.items",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "purplship.server.graph.serializers.CARRIER_MODEL_SERIALIZERS",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.graph.serializers",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.make_fields_optional",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "graphene.Field",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "graphene.List",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "graphene_django.rest_framework.serializer_converter.convert_serializer_to_input_type",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "graphene.String",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "graphene.types.generic.GenericScalar",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "graphene.types.generic",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "graphene.Field",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "graphene.InputObjectType",
"line_number": 152,
"usage_type": "attribute"
}
] |
3901687808
|
from copy import copy
import pygame
from pygame.math import Vector2, Vector3
from pygame.locals import *
import pygame_gui
from pygame_gui.elements.ui_text_box import UITextBox
from utils import *
from cube import Cube
from hero import Direction
from gold import Gold
from chest import Chest
from box import Box
from direction import Direction
from levels.lv import *
class Game:
def __init__(self) -> None:
self.level = None
self.hero = None
self.ui_manager = None
self.clock = pygame.time.Clock()
self.resolution_screen = (320, 240)
self.resolution_window = (640, 480)
self.surface_window = pygame.display.set_mode(self.resolution_window)
self.surface_screen = pygame.Surface(self.resolution_screen)
self.camera = Vector2(0, 0)
# init GUI
self.ui_manager = pygame_gui.UIManager(
self.resolution_screen, "data/themes/classic.json"
)
self.hud_textbox = UITextBox(
"",
pygame.Rect((0, 0), (320, 35)),
manager=self.ui_manager,
object_id="#hud_textbox",
)
self.msg_textbox = UITextBox(
"",
pygame.Rect((0, 180), (320, 60)),
manager=self.ui_manager,
object_id="#msg_textbox",
)
self.msg_textbox.hide()
if __debug__:
self.debug_textbox = UITextBox(
"",
pygame.Rect((0, 35), (320, 35)),
manager=self.ui_manager,
object_id="#debug_textbox",
)
def draw(self):
# draw
self.surface_screen.fill((0, 0, 0))
self.level.draw(self.camera, self.surface_screen)
hud_text = f"{self.hero.gold}G"
self.hud_textbox.html_text = hud_text
self.hud_textbox.rebuild()
# debug
if __debug__:
debug_text = f"{self.level.size} {self.hero.position} | {int(self.clock.get_fps())} fps"
self.debug_textbox.html_text = debug_text
self.debug_textbox.rebuild()
# draw lines around drawables
for drawable in self.level.drawables:
bl, br, tl, tr = list(
map(
(lambda coord: cartesian_to_isometric(coord)),
drawable.get_coords(),
)
)
# adjust all points with camera and z position
points = list(
map(
(
lambda point: (
point.x + self.camera.x,
point.y
+ self.camera.y
- drawable.position.z
+ Cube.SIZE,
)
),
[bl, br, br, tr, tl, tr, tl, bl],
)
)
pygame.draw.lines(
self.surface_screen,
(255, 255, 255),
False,
points,
)
# Top
points = list(
map(
(
lambda point: (
point.x + self.camera.x,
point.y
+ self.camera.y
- drawable.position.z
- drawable.size.z
+ Cube.SIZE,
)
),
[bl, br, br, tr, tl, tr, tl, bl],
)
)
pygame.draw.lines(
self.surface_screen,
(255, 255, 255),
False,
points,
)
self.ui_manager.draw_ui(self.surface_screen)
def update_display(self):
scaled_win = pygame.transform.scale(
self.surface_screen, self.surface_window.get_size()
)
self.surface_window.blit(scaled_win, (0, 0))
pygame.display.update()
def hero_on_ground(self):
[bl, br, tl, tr] = self.hero.get_coords()
return not (
self.not_solid(self.get_at(bl.x, bl.y, (self.hero.position.z - 1)))
and self.not_solid(self.get_at(br.x, br.y, (self.hero.position.z - 1)))
and self.not_solid(self.get_at(tl.x, tl.y, (self.hero.position.z - 1)))
and self.not_solid(self.get_at(tr.x, tr.y, (self.hero.position.z - 1)))
)
def events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
pygame.quit()
quit()
if event.key == pygame.K_LEFT:
self.hero.is_moving = True
self.hero.direction = Direction.LEFT
if event.key == pygame.K_RIGHT:
self.hero.is_moving = True
self.hero.direction = Direction.RIGHT
if event.key == pygame.K_UP:
self.hero.is_moving = True
self.hero.direction = Direction.UP
if event.key == pygame.K_DOWN:
self.hero.is_moving = True
self.hero.direction = Direction.DOWN
if __debug__ and event.key == pygame.K_d:
sorted_drawables = sorted(
self.level.cubes + self.level.drawables,
key=lambda drawable: drawable.zindex,
)
for drawable in sorted_drawables:
print(drawable)
if event.key == pygame.K_SPACE:
if self.hero.on_ground:
self.hero.jump = True
# action button
if event.key == pygame.K_RETURN:
# interact in front of hero
x = self.hero.position.x
y = self.hero.position.y
z = self.hero.position.z
#
if self.hero.direction == Direction.UP:
y -= 8
elif self.hero.direction == Direction.RIGHT:
x += 24
elif self.hero.direction == Direction.DOWN:
y += 24
elif self.hero.direction == Direction.LEFT:
x -= 8
box = Box(Vector3(x, y, z), Vector3(8, 8, 8))
for drawable in self.level.drawables:
if __debug__:
print(f"check interact with {drawable}")
if box.intersect(drawable):
if isinstance(drawable, Chest) and not drawable.is_open:
drawable.open()
elif (
isinstance(drawable, NPC)
and drawable.on_interact is not None
):
if self.hero.direction == Direction.LEFT:
drawable.direction = Direction.RIGHT
elif self.hero.direction == Direction.UP:
drawable.direction = Direction.DOWN
elif self.hero.direction == Direction.RIGHT:
drawable.direction = Direction.LEFT
elif self.hero.direction == Direction.DOWN:
drawable.direction = Direction.UP
drawable.on_interact()
elif event.type == KEYUP:
if event.key in [
pygame.K_LEFT,
pygame.K_RIGHT,
pygame.K_UP,
pygame.K_DOWN,
]:
self.hero.is_moving = False
def get_at(self, x, y, z):
index_x = x // Cube.SIZE
index_y = y // Cube.SIZE
index_z = z // Cube.SIZE
cube = self.level.get_cube(index_x, index_y, index_z)
if cube is not None:
return cube
for drawable in self.level.drawables:
if drawable.intersect_Vector3(Vector3(x, y, z)):
return drawable
return None
def not_solid(self, drawable):
return drawable is None or (drawable is not None and drawable.solid == False)
def update_camera(self):
self.camera = cartesian_to_isometric(
(self.hero.position.x, self.hero.position.y)
)
self.camera.x = self.resolution_screen[0] // 2 - self.camera.x
self.camera.y = (
self.resolution_screen[1] // 2 - self.camera.y + self.hero.position.z
)
def update(self):
time_delta = self.clock.tick(60) / 1000.0
self.hero.on_ground = self.hero_on_ground()
for drawable in self.level.drawables:
drawable.update_zindex()
self.events()
# check collectibles
drawable_index = 0
for drawable in self.level.drawables:
if self.hero == drawable:
continue
if self.hero.intersect(drawable):
if isinstance(drawable, Gold):
self.hero.gold += drawable.amount
del self.level.drawables[drawable_index]
drawable_index += 1
# check events
event_index = 0
for event in self.level.events:
if self.hero.intersect(event):
event.on_intersect()
event_index += 1
# update hero location
if self.hero.is_moving:
next_pos = copy(self.hero.position)
if self.hero.direction == Direction.UP:
next_pos.y -= 1
if next_pos.y >= 0:
at_top_left = self.get_at(next_pos.x, next_pos.y, next_pos.z)
at_top_right = self.get_at(
next_pos.x + self.hero.size.x, next_pos.y, next_pos.z
)
if self.not_solid(at_top_left) and self.not_solid(at_top_right):
self.hero.position = copy(next_pos)
self.update_camera()
elif self.hero.direction == Direction.RIGHT:
next_pos.x += 1
if next_pos.x + self.hero.size.x < self.level.size.x * Cube.SIZE:
at_top_right = self.get_at(
next_pos.x + self.hero.size.x, next_pos.y, next_pos.z
)
at_btm_right = self.get_at(
next_pos.x + self.hero.size.x,
next_pos.y + self.hero.size.y,
next_pos.z,
)
if self.not_solid(at_top_right) and self.not_solid(at_btm_right):
self.hero.position = copy(next_pos)
self.update_camera()
elif self.hero.direction == Direction.DOWN:
next_pos.y += 1
if next_pos.y + self.hero.size.y < self.level.size.y * Cube.SIZE:
at_btm_left = self.get_at(
next_pos.x, next_pos.y + self.hero.size.y, next_pos.z
)
at_btm_right = self.get_at(
next_pos.x + self.hero.size.x,
next_pos.y + self.hero.size.y,
next_pos.z,
)
if self.not_solid(at_btm_left) and self.not_solid(at_btm_right):
self.hero.position = copy(next_pos)
self.update_camera()
elif self.hero.direction == Direction.LEFT:
next_pos.x -= 1
if next_pos.x > 0:
at_top_left = self.get_at(next_pos.x, next_pos.y, next_pos.z)
at_btm_left = self.get_at(
next_pos.x,
next_pos.y + self.hero.size.y,
next_pos.z,
)
if self.not_solid(at_top_left) and self.not_solid(at_btm_left):
self.hero.position = copy(next_pos)
self.update_camera()
# jump
if self.hero.jump == True:
if self.hero.jump_cur >= self.hero.jump_max:
self.hero.jump = False
self.hero.jump_cur = 0
else:
self.hero.position.z += 1
self.hero.jump_cur += 1
self.update_camera()
# gravity
if self.hero.jump == False and not self.hero.on_ground:
self.hero.position.z -= 1
self.update_camera()
self.ui_manager.update(time_delta)
|
odrevet/isometric-map
|
game.py
|
game.py
|
py
| 13,216 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.time.Clock",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pygame.math.Vector2",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pygame_gui.UIManager",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pygame_gui.elements.ui_text_box.UITextBox",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame_gui.elements.ui_text_box.UITextBox",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pygame_gui.elements.ui_text_box.UITextBox",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "cube.Cube.SIZE",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "pygame.draw.lines",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube.SIZE",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "pygame.draw.lines",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_q",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction.LEFT",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction.RIGHT",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "pygame.K_UP",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction.UP",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "pygame.K_DOWN",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction.DOWN",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "pygame.K_d",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RETURN",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction.UP",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "direction.Direction.RIGHT",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "direction.Direction.DOWN",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "direction.Direction.LEFT",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "box.Box",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pygame.math.Vector3",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "box.intersect",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "chest.Chest",
"line_number": 206,
"usage_type": "argument"
},
{
"api_name": "direction.Direction.LEFT",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "direction.Direction.RIGHT",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "direction.Direction.UP",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "direction.Direction.DOWN",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "direction.Direction.RIGHT",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "direction.Direction.LEFT",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "direction.Direction.DOWN",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "direction.Direction.UP",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_UP",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_DOWN",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube.SIZE",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "cube.Cube.SIZE",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "cube.Cube.SIZE",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "pygame.math.Vector3",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "gold.Gold",
"line_number": 276,
"usage_type": "argument"
},
{
"api_name": "copy.copy",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "direction.Direction.UP",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "copy.copy",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "direction.Direction.RIGHT",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "cube.Cube.SIZE",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "copy.copy",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "direction.Direction.DOWN",
"line_number": 324,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 324,
"usage_type": "name"
},
{
"api_name": "cube.Cube.SIZE",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "cube.Cube",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "copy.copy",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "direction.Direction.LEFT",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "direction.Direction",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "copy.copy",
"line_number": 353,
"usage_type": "call"
}
] |
72496131069
|
import json
import logging
import os
import requests
from django.http import JsonResponse
from django.shortcuts import redirect
from dotenv import load_dotenv
from rest_framework import status, permissions, response, views
from rest_framework_simplejwt import tokens
from rest_framework_simplejwt.tokens import RefreshToken
from main.utils import responseErrorMessage
from accounts.models import UserData, FizUser, YurUser, Role
from accounts.serializers import FizUserSerializer, YurUserSerializer
logger = logging.getLogger(__name__)
load_dotenv()
class OneIDLoginView(views.APIView):
permission_classes = ()
def get(self, request):
response_type = 'one_code'
client_id = os.getenv("CLIENT_ID")
redirect_uri = request.GET.get('path') + '/code'
scope = os.getenv("SCOPE")
state = os.getenv("STATE")
base_url = os.getenv("BASE_URL")
url = base_url + '?response_type=' + response_type + '&client_id=' + client_id + '&redirect_uri=' + redirect_uri + '&scope=' + scope + '&state=' + state
return redirect(url)
class LoginView(views.APIView):
permission_classes = ()
def post(self, request):
grant_type = 'one_authorization_code'
client_id = os.getenv("CLIENT_ID")
client_secret = os.getenv('CLIENT_SECRET')
redirect_uri = request.META.get('HTTP_X_PATH') + '/code'
code = request.META.get('HTTP_X_AUTH')
res = requests.post(os.getenv("BASE_URL"), {
'grant_type': grant_type,
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': redirect_uri,
'code': code
})
return JsonResponse(json.loads(res.content))
class GetUser(views.APIView):
permission_classes = ()
def kiril2latin(self, text):
host = os.getenv("MATN_UZ_HOST")
token = os.getenv("MATN_UZ_TOKEN")
url = host + '/latin'
response = requests.post(url, headers={'Authorization': 'Bearer ' + token}, data={'text': text})
return response.json()
def post(self, request):
is_client = request.data.get("is_client", None)
if is_client is None:
responseErrorMessage(message="is_client is required", status_code=status.HTTP_400_BAD_REQUEST)
grant_type = 'one_access_token_identify'
client_id = os.getenv("CLIENT_ID")
client_secret = os.getenv('CLIENT_SECRET')
access_token = request.META.get("HTTP_X_AUTHENTICATION")
scope = os.getenv("SCOPE")
base_url = os.getenv("BASE_URL")
res = requests.post(base_url, {
"grant_type": grant_type,
"client_id": client_id,
"client_secret": client_secret,
"access_token": access_token,
"scope": scope
})
if res.status_code == 200:
# data = json.loads(res.content)
# if data['legal_info']:
# username = data['legal_info'][0]['tin']
# password = data['first_name'][0] + data['legal_info'][0]['tin'] + data['first_name'][-1]
# else:
# username = data['pin']
# password = data['first_name'][0] + data['pin'] + data['first_name'][-1]
data = res.json()
logger.info("96 oneId_data_user_type: ", data.get("user_type", "Empty"))
logger.info("97 oneId_data: ", data)
if data.get('legal_info'):
username = data['legal_info'][0]['tin']
else:
username = data['pin']
first_name = data['first_name']
password = f"{first_name[0]}{username}{first_name[-1]}"
# UserData table ga yangi kirgan userni ma'lumotlarini yozish
# if UserData.objects.filter(username=username).exists():
# user = UserData.objects.get(username=username)
# else:
# if int(is_client):
# client_role = Role.objects.get(name='mijoz')
# else:
# client_role = None
# if data['legal_info']:
# user = UserData.objects.create_user(username=username, password=password, type=2, role=client_role)
# else:
# user = UserData.objects.create_user(username=username, password=password, type=1, role=client_role)
# print(data)
try:
user = UserData.objects.get(username=username)
except UserData.DoesNotExist:
client_role = Role.objects.get(name=Role.RoleNames.CLIENT) if int(is_client) else None
user_type = 2 if data['legal_info'] else 1
user = UserData.objects.create_user(
username=username,
password=password,
type=user_type,
role=client_role
)
if not is_client and user.status_action == 1:
user.status_action = 2
user.save()
# if (user.status_action != 3 or str(user.role.name).lower() == "mijoz") and not is_client:
# responseErrorMessage(message="you are not employee", status_code=status.HTTP_400_BAD_REQUEST)
data['userdata'] = user.id
data['pport_issue_date'] = json.loads(res.content)['_pport_issue_date']
data['pport_expr_date'] = json.loads(res.content)['_pport_expr_date']
data['ctzn'] = self.kiril2latin(data['ctzn'])
data['per_adr'] = self.kiril2latin(data.get('per_adr'))
data['pport_issue_place'] = self.kiril2latin(data['pport_issue_place'])
data['natn'] = self.kiril2latin(data['natn'])
if data['legal_info']:
# YurUser table ga yangi kirgan userni ma'lumotlarini yozish
data['name'] = data['legal_info'][0]['acron_UZ']
data['tin'] = data['legal_info'][0]['tin']
if not YurUser.objects.filter(userdata=user).exists():
userr = YurUserSerializer(data=data)
userr.is_valid(raise_exception=True)
userr.save()
else: # pass ni ozgartirdim
userr = YurUserSerializer(YurUser.objects.get(userdata=user), data=data, partial=True)
userr.is_valid(raise_exception=True)
userr.save()
else:
# FizUser table ga yangi kirgan userni ma'lumotlarini yozish
if not FizUser.objects.filter(userdata=user).exists():
userr = FizUserSerializer(data=data)
userr.is_valid(raise_exception=True)
userr.save()
else: # pass ni ozgartirdim
userr = FizUserSerializer(FizUser.objects.get(userdata=user), data=data, partial=True)
userr.is_valid(raise_exception=True)
userr.save()
token = tokens.RefreshToken.for_user(user)
if user.role:
role = user.role.name
else:
role = None
tk = {
"access": str(token.access_token),
"refresh": str(token),
"role": role
}
return JsonResponse(tk)
else:
return JsonResponse({"error": "Xatolik!"})
class Logout(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
def post(self, request):
grant_type = 'one_log_out'
client_id = os.getenv("CLIENT_ID")
client_secret = os.getenv('CLIENT_SECRET')
access_token = request.META.get("HTTP_X_AUTHENTICATION")
scope = os.getenv("SCOPE")
base_url = os.getenv("BASE_URL")
refresh_token = request.data["refresh_token"]
requests.post(base_url, {
'grant_type': grant_type,
'client_id': client_id,
'client_secret': client_secret,
'access_token': access_token,
'scope': scope
})
token = RefreshToken(refresh_token)
token.blacklist()
return response.Response(status=205)
|
Rahmet97/TestProjectBackend
|
oneid/views.py
|
views.py
|
py
| 8,211 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.views",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "os.getenv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.views",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "os.getenv",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.views",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "os.getenv",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "rest_framework.response",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.json",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "rest_framework.response",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "main.utils.responseErrorMessage",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "os.getenv",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "accounts.models.UserData.objects.get",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "accounts.models.UserData.objects",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.UserData",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "accounts.models.UserData.DoesNotExist",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.UserData",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "accounts.models.Role.objects.get",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "accounts.models.Role.objects",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.Role",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "accounts.models.Role.RoleNames",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.UserData.objects.create_user",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "accounts.models.UserData.objects",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.UserData",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "accounts.models.YurUser.objects.filter",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "accounts.models.YurUser.objects",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.YurUser",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "accounts.serializers.YurUserSerializer",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "accounts.serializers.YurUserSerializer",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "accounts.models.YurUser.objects.get",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "accounts.models.YurUser.objects",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.YurUser",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "accounts.models.FizUser.objects.filter",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "accounts.models.FizUser.objects",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.FizUser",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "accounts.serializers.FizUserSerializer",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "accounts.serializers.FizUserSerializer",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "accounts.models.FizUser.objects.get",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "accounts.models.FizUser.objects",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.FizUser",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "rest_framework_simplejwt.tokens.RefreshToken.for_user",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "rest_framework_simplejwt.tokens.RefreshToken",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "rest_framework_simplejwt.tokens",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.views",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.IsAuthenticated",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.permissions",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "os.getenv",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "rest_framework_simplejwt.tokens.RefreshToken",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "rest_framework.response",
"line_number": 207,
"usage_type": "name"
}
] |
26284510734
|
"""Makes POST requests to the openai API to find three main topics relating to the article title"""
import re
from datetime import datetime
import json
import pandas as pd
CURRENT_TIMESTAMP = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
JSON_FILE = f'response.json'
def read_response_json() -> list[dict]:
"""Extracts openai response data from json file."""
with open(JSON_FILE, 'r') as f:
response_list = json.load(f)
return response_list
def get_story_topics(response_list: list[dict]) -> list[dict]:
"""Extracts dictionaries containing each story id and associated topics from the openai response."""
valid_stories = []
all_stories_data = response_list['choices'][0]['message']['content']
stories_dict = re.findall('\{(.*?)\}', all_stories_data)
for story in stories_dict:
try:
topic_split = story.split(':')
valid_stories.append(
{int(topic_split[0]): eval(topic_split[1])})
except:
continue
return valid_stories
def create_topic_csv(valid_stories: list[dict], table: str, id: str) -> None:
"""Stores media story topics in a csv file"""
response_df = pd.DataFrame(
columns=[id, 'topic_one', 'topic_two', 'topic_three'])
for story_dict in valid_stories:
for story_id, story_topics in story_dict.items():
if isinstance(story_topics, list):
while len(story_topics) < 3:
story_topics.append("UNTAGGED")
new_row = {id: story_id,
'topic_one': story_topics[0], 'topic_two': story_topics[1], 'topic_three': story_topics[2]}
response_df.loc[len(response_df)] = new_row
response_df.to_csv(f'{table}.csv', index=False)
def create_reddit_topic_csv(valid_stories: list[dict]) -> None:
"""Stores reddit story topics in a csv file"""
response_df = pd.DataFrame(
columns=['re_article_id', 'topic_one', 'topic_two', 'topic_three'])
for story_dict in valid_stories:
for story_id, story_topics in story_dict.items():
if isinstance(story_topics, list):
while len(story_topics) < 3:
story_topics.append("UNTAGGED")
new_row = {'re_article_id': story_id,
'topic_one': story_topics[0], 'topic_two': story_topics[1], 'topic_three': story_topics[2]}
response_df.loc[len(response_df)] = new_row
response_df.to_csv(f'reddit.csv', index=False)
|
IADesai/media-sentiment
|
tagging_pipeline/transform.py
|
transform.py
|
py
| 2,522 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 54,
"usage_type": "call"
}
] |
8530766170
|
from application import app, db
from flask import render_template, request, redirect, url_for
from flask_login import login_required
from flask_user import roles_required
from application.vehicles.models import Vehicle
from application.vehicles.forms import VehicleForm
@app.route("/vehicles/new/")
@roles_required('ADMIN')
def vehicles_form():
return render_template("vehicles/new.html", form=VehicleForm())
@app.route("/vehicles/", methods=["GET"])
@login_required
def vehicles_index():
vehicles = Vehicle.query.all()
return render_template("vehicles/list.html", vehicles=vehicles)
@app.route("/vehicles/", methods=["POST"])
@roles_required('ADMIN')
def vehicles_create():
form = VehicleForm(request.form)
if not form.validate():
return render_template("vehicles/new.html", form=form)
plate = form.plate.data
name = form.nickname.data
make = form.make.data
model = form.model.data
year = form.year.data
kms = form.kilometers.data
v = Vehicle(plate, name, make, model, year, kms)
db.session().add(v)
db.session().commit()
return redirect(url_for('vehicles_index'))
|
skoskipaa/VehicleLogs
|
application/vehicles/views.py
|
views.py
|
py
| 1,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.render_template",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "application.vehicles.forms.VehicleForm",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "application.app.route",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "application.app",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask_user.roles_required",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "application.vehicles.models.Vehicle.query.all",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "application.vehicles.models.Vehicle.query",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "application.vehicles.models.Vehicle",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "application.app.route",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "application.app",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask_login.login_required",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "application.vehicles.forms.VehicleForm",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "application.vehicles.models.Vehicle",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "application.db.session",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "application.db",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "application.db.session",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "application.db",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "application.app.route",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "application.app",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask_user.roles_required",
"line_number": 24,
"usage_type": "call"
}
] |
22416441261
|
import numpy as np
import math
from gridmap2d import *
from bresenham_algorithm import *
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Circle
from matplotlib.animation import ArtistAnimation
def pose2transform(pose):
"""
@brief convert (x, y , yaw) to transform matrix
@param pose: (x, y, yaw)
@return: 3*3 transform matrix
"""
transform = np.identity(3)
transform[0:2, 2] = pose[0:2]
transform[0, 0] = math.cos(pose[2])
transform[0, 1] = -math.sin(pose[2])
transform[1, 0] = math.sin(pose[2])
transform[1, 1] = math.cos(pose[2])
return transform
def bresenham_line_by_world(world_map, A, B, prob):
a = world_map.world2pixel(A)
b = world_map.world2pixel(B)
res = bresenham_line(a, b)
for item in res:
world_map.set_prob_by_pixel(item, prob)
def draw_circle_test(world_map):
C = np.array([6, 6, 1]) / world_map.resolution
res = bresenham_circle(C)
for item in res:
world_map.set_prob_by_pixel(item, world_map.probrange[1])
# c(0,0)
# /\
# / \
# / \
# ------
#d(-0.6,-0.2) e(-0.6, 0.2)
def construct_robot_in_gridmap(pose):
"""
@brief draw robot
@param pose: (x, y, yaw) should be a numpy vector
"""
transform = pose2transform(pose)
c = np.array([0.0, 0.0, 1.0])
d = np.array([-0.6, -0.2, 1.0])
e = np.array([-0.6, 0.2, 1.0])
c = transform @ c
d = transform @ d
e = transform @ e
bresenham_line_by_world(world_map, e, d, world_map.probrange[1])
bresenham_line_by_world(world_map, c, d, world_map.probrange[1])
bresenham_line_by_world(world_map, c, e, world_map.probrange[1])
def construct_home(world_map):
A = np.array([0, 0]) + 1
B = np.array([0, 10]) + 1
bresenham_line_by_world(world_map, A, B, world_map.probrange[1])
A = np.array([0, 10]) + 1
B = np.array([8, 10]) + 1
bresenham_line_by_world(world_map, A, B, world_map.probrange[1])
A = np.array([8, 10]) + 1
B = np.array([8, 0]) + 1
bresenham_line_by_world(world_map, A, B, world_map.probrange[1])
A = np.array([8, 0]) + 1
B = np.array([0, 0]) + 1
bresenham_line_by_world(world_map, A, B, world_map.probrange[1])
A = np.array([4, 0]) + 1
B = np.array([4, 5]) + 1
bresenham_line_by_world(world_map, A, B, world_map.probrange[1])
A = np.array([4, 8]) + 1
B = np.array([4, 10]) + 1
bresenham_line_by_world(world_map, A, B, world_map.probrange[1])
A = np.array([3, 6]) + 1
B = np.array([3, 8]) + 1
bresenham_line_by_world(world_map, A, B, world_map.probrange[1])
A = np.array([0, 5]) + 1
B = np.array([3, 5]) + 1
bresenham_line_by_world(world_map, A, B, world_map.probrange[1])
A = np.array([5, 5]) + 1
B = np.array([8, 5]) + 1
bresenham_line_by_world(world_map, A, B, world_map.probrange[1])
A = np.array([0, 8]) + 1
B = np.array([3, 8]) + 1
bresenham_line_by_world(world_map, A, B, world_map.probrange[1])
ell1 = Ellipse(xy = (0.0, 0.0), width = 4, height = 8, angle = 30.0, facecolor= 'yellow', alpha=0.3)
print(ell1.get_gid())
world_map = gridmap2d((10, 12), 0.02, (-20, 120))
images = []
fig, ax = plt.subplots()
construct_home(world_map)
pose = np.array([3, 3, math.pi / 2])
construct_robot_in_gridmap(pose)
draw_circle_test(world_map)
im = plt.imshow(world_map.mapdata, cmap=plt.cm.get_cmap('hot'), animated=True,
vmin=world_map.probrange[0], vmax=world_map.probrange[1])
fig.colorbar(im)
images.append([im])
ani = ArtistAnimation(fig, images, interval=1, blit=True,
repeat_delay=10)
plt.show()
|
democheng/PythonRobotics
|
SLAM/test.py
|
test.py
|
py
| 3,620 |
python
|
en
|
code
| 15 |
github-code
|
6
|
[
{
"api_name": "numpy.identity",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Ellipse",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm.get_cmap",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.animation.ArtistAnimation",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 113,
"usage_type": "name"
}
] |
20240957628
|
"""
This file contains the definition of the SMPL model
"""
from __future__ import division, print_function
import torch
import torch.nn as nn
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
from pyrender import PerspectiveCamera, \
DirectionalLight, SpotLight, PointLight, \
MetallicRoughnessMaterial, \
Primitive, Mesh, Node, Scene, \
Viewer, OffscreenRenderer
def quat2mat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def rodrigues(theta):
"""Convert axis-angle representation to rotation matrix.
Args:
theta: size = [B, 3]
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
l1norm = torch.norm(theta + 1e-8, p = 2, dim = 1)
angle = torch.unsqueeze(l1norm, -1)
normalized = torch.div(theta, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = torch.cat([v_cos, v_sin * normalized], dim = 1)
return quat2mat(quat)
class TetraSMPL(nn.Module):
"""
Implementation of tetrahedral SMPL model
Modified from https://github.com/nkolot/GraphCMR/blob/master/models/smpl.py
"""
def __init__(self, model_file, model_additional_file):
super(TetraSMPL, self).__init__()
with open(model_file, 'rb') as f:
smpl_model = pickle.load(f, encoding='iso-8859-1')
smpl_model_addition = np.load(model_additional_file) # addition parameters for tetrahedrons
smpl_model.update(smpl_model_addition)
self.orig_vert_num = smpl_model['v_template'].shape[0]
self.added_vert_num = smpl_model['v_template_added'].shape[0]
self.total_vert_num = self.orig_vert_num + self.added_vert_num
J_regressor = smpl_model['J_regressor'].tocoo()
row = J_regressor.row
col = J_regressor.col
data = J_regressor.data
i = torch.LongTensor([row, col])
v = torch.FloatTensor(data)
J_regressor_shape = [24, self.orig_vert_num + self.added_vert_num]
smpl_model['weights'] = np.concatenate([smpl_model['weights'], smpl_model['weights_added']], axis=0)
smpl_model['posedirs'] = np.concatenate([smpl_model['posedirs'], smpl_model['posedirs_added']], axis=0)
smpl_model['shapedirs'] = np.concatenate([smpl_model['shapedirs'], smpl_model['shapedirs_added']], axis=0)
smpl_model['v_template'] = np.concatenate([smpl_model['v_template'], smpl_model['v_template_added']], axis=0)
self.register_buffer('J_regressor', torch.sparse.FloatTensor(i, v, J_regressor_shape).to_dense())
self.register_buffer('weights', torch.FloatTensor(smpl_model['weights']))
self.register_buffer('posedirs', torch.FloatTensor(smpl_model['posedirs']))
self.register_buffer('v_template', torch.FloatTensor(smpl_model['v_template']))
self.register_buffer('shapedirs', torch.FloatTensor(np.array(smpl_model['shapedirs'])))
self.register_buffer('faces', torch.from_numpy(smpl_model['f'].astype(np.int64)))
self.register_buffer('tetrahedrons', torch.from_numpy(smpl_model['tetrahedrons'].astype(np.int64)))
self.register_buffer('kintree_table', torch.from_numpy(smpl_model['kintree_table'].astype(np.int64)))
id_to_col = {self.kintree_table[1, i].item(): i for i in range(self.kintree_table.shape[1])}
self.register_buffer('parent', torch.LongTensor([id_to_col[self.kintree_table[0, it].item()] for it in range(1, self.kintree_table.shape[1])]))
self.pose_shape = [24, 3]
self.beta_shape = [10]
self.translation_shape = [3]
self.pose = torch.zeros(self.pose_shape)
self.beta = torch.zeros(self.beta_shape)
self.translation = torch.zeros(self.translation_shape)
self.verts = None
self.J = None
self.R = None
def forward(self, pose, beta):
device = pose.device
batch_size = pose.shape[0]
v_template = self.v_template[None, :]
shapedirs = self.shapedirs.view(-1,10)[None, :].expand(batch_size, -1, -1)
beta = beta[:, :, None]
v_shaped = torch.matmul(shapedirs, beta).view(batch_size, -1, 3) + v_template
# batched sparse matmul not supported in pytorch
J = []
for i in range(batch_size):
J.append(torch.matmul(self.J_regressor, v_shaped[i]))
J = torch.stack(J, dim=0)
# input it rotmat: (bs,24,3,3)
if pose.ndimension() == 4:
R = pose
# input it rotmat: (bs,72)
elif pose.ndimension() == 2:
pose_cube = pose.view(-1, 3) # (batch_size * 24, 1, 3)
R = rodrigues(pose_cube).view(batch_size, 24, 3, 3)
R = R.view(batch_size, 24, 3, 3)
I_cube = torch.eye(3)[None, None, :].to(device)
# I_cube = torch.eye(3)[None, None, :].expand(theta.shape[0], R.shape[1]-1, -1, -1)
lrotmin = (R[:,1:,:] - I_cube).view(batch_size, -1)
posedirs = self.posedirs.view(-1,207)[None, :].expand(batch_size, -1, -1)
v_posed = v_shaped + torch.matmul(posedirs, lrotmin[:, :, None]).view(batch_size, -1, 3)
J_ = J.clone()
J_[:, 1:, :] = J[:, 1:, :] - J[:, self.parent, :]
G_ = torch.cat([R, J_[:, :, :, None]], dim=-1)
pad_row = torch.FloatTensor([0,0,0,1]).to(device).view(1,1,1,4).expand(batch_size, 24, -1, -1)
G_ = torch.cat([G_, pad_row], dim=2)
G = [G_[:, 0].clone()]
for i in range(1, 24):
G.append(torch.matmul(G[self.parent[i-1]], G_[:, i, :, :]))
G = torch.stack(G, dim=1)
rest = torch.cat([J, torch.zeros(batch_size, 24, 1).to(device)], dim=2).view(batch_size, 24, 4, 1)
zeros = torch.zeros(batch_size, 24, 4, 3).to(device)
rest = torch.cat([zeros, rest], dim=-1)
rest = torch.matmul(G, rest)
G = G - rest
T = torch.matmul(self.weights, G.permute(1,0,2,3).contiguous().view(24,-1)).view(-1, batch_size, 4, 4).transpose(0,1)
rest_shape_h = torch.cat([v_posed, torch.ones_like(v_posed)[:, :, [0]]], dim=-1)
v = torch.matmul(T, rest_shape_h[:, :, :, None])[:, :, :3, 0]
return v
def get_vertex_transformation(self, pose, beta):
device = pose.device
batch_size = pose.shape[0]
v_template = self.v_template[None, :]
shapedirs = self.shapedirs.view(-1, 10)[None, :].expand(batch_size, -1, -1)
beta = beta[:, :, None]
v_shaped = torch.matmul(shapedirs, beta).view(batch_size, -1, 3) + v_template
# batched sparse matmul not supported in pytorch
J = []
for i in range(batch_size):
J.append(torch.matmul(self.J_regressor, v_shaped[i]))
J = torch.stack(J, dim=0)
# input it rotmat: (bs,24,3,3)
if pose.ndimension() == 4:
R = pose
# input it rotmat: (bs,72)
elif pose.ndimension() == 2:
pose_cube = pose.view(-1, 3) # (batch_size * 24, 1, 3)
R = rodrigues(pose_cube).view(batch_size, 24, 3, 3)
R = R.view(batch_size, 24, 3, 3)
I_cube = torch.eye(3)[None, None, :].to(device)
# I_cube = torch.eye(3)[None, None, :].expand(theta.shape[0], R.shape[1]-1, -1, -1)
lrotmin = (R[:, 1:, :] - I_cube).view(batch_size, -1)
posedirs = self.posedirs.view(-1, 207)[None, :].expand(batch_size, -1, -1)
v_posed = v_shaped + torch.matmul(posedirs, lrotmin[:, :, None]).view(batch_size, -1, 3)
J_ = J.clone()
J_[:, 1:, :] = J[:, 1:, :] - J[:, self.parent, :]
G_ = torch.cat([R, J_[:, :, :, None]], dim=-1)
pad_row = torch.FloatTensor([0, 0, 0, 1]).to(device).view(1, 1, 1, 4).expand(batch_size, 24,
-1, -1)
G_ = torch.cat([G_, pad_row], dim=2)
G = [G_[:, 0].clone()]
for i in range(1, 24):
G.append(torch.matmul(G[self.parent[i - 1]], G_[:, i, :, :]))
G = torch.stack(G, dim=1)
rest = torch.cat([J, torch.zeros(batch_size, 24, 1).to(device)], dim=2).view(batch_size, 24,
4, 1)
zeros = torch.zeros(batch_size, 24, 4, 3).to(device)
rest = torch.cat([zeros, rest], dim=-1)
rest = torch.matmul(G, rest)
G = G - rest
T = torch.matmul(
self.weights, G.permute(1, 0, 2, 3).contiguous().view(24, -1)).view(
-1, batch_size, 4, 4).transpose(0, 1)
return T
def get_smpl_joints(self, vertices):
"""
This method is used to get the joint locations from the SMPL mesh
Input:
vertices: size = (B, 6890, 3)
Output:
3D joints: size = (B, 38, 3)
"""
joints = torch.einsum('bik,ji->bjk', [vertices, self.J_regressor])
return joints
def get_root(self, vertices):
"""
This method is used to get the root locations from the SMPL mesh
Input:
vertices: size = (B, 6890, 3)
Output:
3D joints: size = (B, 1, 3)
"""
joints = torch.einsum('bik,ji->bjk', [vertices, self.J_regressor])
return joints[:, 0:1, :]
if __name__ == '__main__':
smpl = TetraSMPL('../data/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl', '../data/tetra_smpl.npz')
pose = np.random.randn(1, 72) * 0.2
shape = np.random.randn(1, 10) * 0.3
pose = torch.from_numpy(pose).float()
shape = torch.from_numpy(shape).float()
vs = smpl.forward(pose, shape)
vs = vs.detach().cpu().numpy()[0]
ts = smpl.tetrahedrons.cpu().numpy()
with open('test.obj', 'w') as fp:
for v in vs:
fp.write('v %f %f %f\n' % (v[0], v[1], v[2]))
for t in ts + 1:
fp.write('f %d %d %d\n' % (t[0] + 1, t[2] + 1, t[1] + 1))
fp.write('f %d %d %d\n' % (t[0] + 1, t[3] + 1, t[2] + 1))
fp.write('f %d %d %d\n' % (t[0] + 1, t[1] + 1, t[3] + 1))
fp.write('f %d %d %d\n' % (t[1] + 1, t[2] + 1, t[3] + 1))
|
ZhengZerong/PaMIR
|
networks/neural_voxelization_layer/smpl_model.py
|
smpl_model.py
|
py
| 10,840 |
python
|
en
|
code
| 179 |
github-code
|
6
|
[
{
"api_name": "torch.stack",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.norm",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.unsqueeze",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.div",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.cos",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.sin",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.sparse.FloatTensor",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.sparse",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "torch.LongTensor",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.eye",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "torch.eye",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "torch.einsum",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "torch.einsum",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 231,
"usage_type": "call"
}
] |
27679628120
|
#!/usr/bin/env python
# https://github.com/drf5n/drf5n-public/blob/master/gdalwarp2match.py
from osgeo import gdal, gdalconst
import argparse
parser = argparse.ArgumentParser(description='Use GDAL to reproject a raster to match the extents and res of a template')
parser.add_argument("source", help="Source file")
parser.add_argument("template", help = "template with extents and resolution to match")
parser.add_argument("destination", help = "destination file (geoTIFF)")
args = parser.parse_args()
print(args)
# Source
src_filename = args.source
src = gdal.Open(src_filename, gdalconst.GA_ReadOnly)
src_proj = src.GetProjection()
src_geotrans = src.GetGeoTransform()
# We want a section of source that matches this:
match_filename = args.template
match_ds = gdal.Open(match_filename, gdalconst.GA_ReadOnly)
match_proj = match_ds.GetProjection()
match_geotrans = match_ds.GetGeoTransform()
wide = match_ds.RasterXSize
high = match_ds.RasterYSize
# Output / destination
dst_filename = args.destination
dst = gdal.GetDriverByName('GTiff').Create(dst_filename, wide, high, 1, gdalconst.GDT_Float32)
dst.SetGeoTransform( match_geotrans )
dst.SetProjection( match_proj)
# Do the work
# GRA_Bilinear Bilinear give diamond-shaped artifacts
# GRA_CubicSpline Cubic-Spline smooths them
# GRA_NearestNeighbour nearest neighbor????
interpolation=gdalconst.GRA_NearestNeighbour
#gdal.ReprojectImage(src, dst, src_proj, match_proj, gdalconst.GRA_Bilinear)
#gdal.ReprojectImage(src, dst, src_proj, match_proj, gdalconst.GRA_CubicSpline)
gdal.ReprojectImage(src, dst, src_proj, match_proj, interpolation)
del dst # Flush
|
comet-licsar/licsar_proc
|
python/gdalwarp2match.py
|
gdalwarp2match.py
|
py
| 1,620 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal.Open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "osgeo.gdalconst.GA_ReadOnly",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "osgeo.gdalconst",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "osgeo.gdal.Open",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "osgeo.gdalconst.GA_ReadOnly",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "osgeo.gdalconst",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "osgeo.gdal.GetDriverByName",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "osgeo.gdalconst.GDT_Float32",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "osgeo.gdalconst",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "osgeo.gdalconst.GRA_NearestNeighbour",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "osgeo.gdalconst",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "osgeo.gdal.ReprojectImage",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 44,
"usage_type": "name"
}
] |
12294385591
|
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import jieba
import pandas as pd
import os
import jieba.analyse
positiondata= pd.read_excel('positiondata.xlsx')
position_detail = positiondata['position_detail']
detail_list = []
for detail in position_detail:
detail_list.append(str(detail).replace('\n' ,'').replace('\xa0','')) #遍历获取position_detail列的文本并做处理
jieba.analyse.set_stop_words('delete_word.txt') #这里首先要import jieba.analyse,且文本文件必须以utf-8格式保存,不然会出现编码错误
jieba.load_userdict('dict_world.txt')
keywords = jieba.analyse.extract_tags(str(detail_list), topK=100, withWeight=True, allowPOS=())
wc = WordCloud(background_color="white", max_words=200,
max_font_size=200, width=800, height=600,
font_path='C:\Windows\Fonts\SimHei.ttf') #直接写字体名称,程序无法定位时可添加路径
#max_words是词云显示的最大词数,max_font_size是字体最大值
wc.generate_from_frequencies(dict(keywords))
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.show()
|
woinews/lagou_position
|
Position_WordCloud.py
|
Position_WordCloud.py
|
py
| 1,136 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pandas.read_excel",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "jieba.analyse.set_stop_words",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "jieba.analyse",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "jieba.load_userdict",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "jieba.analyse.extract_tags",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "jieba.analyse",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
}
] |
26277293350
|
import functools
import logging
import os
import Bio.Seq
import Bio.SeqRecord
import six
from . import conf, errors
logger = logging.getLogger(__name__)
configs = conf.CONFIGS
ELASPIC_LOGO = """
8888888888 888 d8888 .d8888b. 8888888b. 8888888 .d8888b.
888 888 d88888 d88P Y88b 888 Y88b 888 d88P Y88b
888 888 d88P888 Y88b. 888 888 888 888 888
8888888 888 d88P 888 "Y888b. 888 d88P 888 888
888 888 d88P 888 "Y88b. 8888888P" 888 888
888 888 d88P 888 "888 888 888 888 888
888 888 d8888888888 Y88b d88P 888 888 Y88b d88P
8888888888 88888888 d88P 888 "Y8888P" 888 8888888 "Y8888P"
"""
class Pipeline:
_valid_run_types = {
# Sequence
"1": "sequence",
"sequence": "sequence",
# Model
"2": "model",
"model": "model",
# Mutation
"3": "mutation",
"mutation": "mutation",
# Sequence - model
"6": "sequence.model",
"sequence,model": "sequence.model",
"sequence.model": "sequence.model",
# Model - mutation
"4": "model.mutation",
"model,mutation": "model.mutation",
"model.mutation": "model.mutation",
# Sequence - model - mutation
"5": "sequence.model.mutation",
"sequence,model,mutation": "sequence.model.mutation",
"sequence.model.mutation": "sequence.model.mutation",
# All
"all": "sequence.model.mutation",
}
def __init__(self, configurations):
""".
It should be possible to initialize one pipeline and call it in parallel using different
mutations as input.
"""
# Read the configuration file and set the variables
if isinstance(configurations, six.string_types):
conf.read_configuration_file(configurations)
elif isinstance(configurations, dict):
configs.update(**configurations)
# Initialize a logger
for line in ELASPIC_LOGO.split("\n"):
logger.info(line)
self.PWD = os.getcwd()
# Each one leads to the next...
self.seqrecords = []
self.sequences = {}
self.models = {}
self.predictions = {}
def run(self, *args, **kwargs):
raise NotImplementedError
@staticmethod
def _split_mutations(mutations):
if mutations is None:
return []
elif not isinstance(mutations, str):
return mutations
for sep in [",", ":"]:
if sep in mutations:
return mutations.split(sep)
return [mutations]
@classmethod
def _validate_run_type(cls, run_type):
try:
return cls._valid_run_types[run_type]
except KeyError:
raise errors.ParameterError("Wrong run_type: '{}'".format(run_type))
# Make Bio objects hashable (hack!)
# TODO: Think more closely about which fields should be used to construct the hash.
Bio.Seq.Seq.__hash__ = lambda self: hash(self.__repr__())
Bio.SeqRecord.SeqRecord.__hash__ = lambda self: hash(self.__repr__())
Bio.SeqRecord.SeqRecord.__eq__ = lambda self, other: self.__hash__() == other.__hash__()
def execute_and_remember(f, _instances={}):
"""A basic memoizer.
.. warning::
Does not consider ``kwargs``!!!
"""
@functools.wraps(f)
def f_new(*args, **kwargs):
key = tuple([f, *args])
if key in _instances:
return _instances[key].result
else:
instance = f(*args, **kwargs)
if instance:
with instance:
instance.run()
_instances[key] = instance
return _instances[key].result
return f_new
|
elaspic/elaspic
|
elaspic/pipeline.py
|
pipeline.py
|
py
| 3,833 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "six.string_types",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "Bio.Seq.Seq",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "Bio.Seq",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "Bio.Seq.SeqRecord",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "Bio.Seq",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "Bio.Seq.SeqRecord",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "Bio.Seq",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 117,
"usage_type": "call"
}
] |
70954029949
|
# OO style
import matplotlib.pyplot as plt
data_uas = [['Bejo', 70],
['Tejo', 83],
['Cecep', 62],
['Wati', 74],
['Karti', 71]
]
fig, ax = plt.subplots()
table = plt.table(cellText = data_uas, loc = 'center')
table.set_fontsize(14)
table.scale(.5, 2) # ukuran kolom, baris
ax.axis(False)
plt.title('OO Style')
plt.show()
|
yusrilarzaqi/Matplotlib-Indonesia-belajar
|
07--Table Plot/1.py
|
1.py
|
py
| 383 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.table",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
}
] |
15285571367
|
from os.path import isfile
import pandas as pd
import logging
from io import BytesIO
from django.http import HttpResponse, JsonResponse, HttpResponseNotFound, Http404
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import generic, View
from django.db import transaction, IntegrityError
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf import settings
from .forms import BasicUploadForm, SearchResult
from .models import RawFile, Result, Pipeline
from project.models import Project
from lrg_omics.common import today
from lrg_omics.proteomics.rawtools.plotly import histograms, lines_plot
from lrg_omics.plotly_tools import (
plotly_heatmap,
plotly_fig_to_div,
plotly_dendrogram,
plotly_bar,
plotly_table,
plotly_histogram,
)
# Create your views here.
def maxquant_pipeline_view(request, project, pipeline):
# Pattern to store form data in session
# to make pagination work with search form
if not request.method == "POST":
if "search-files" in request.session:
request.POST = request.session["search-files"]
request.method = "POST"
else:
form = SearchResult(request.POST)
maxquant_runs = Result.objects.filter(raw_file__pipeline__slug=pipeline)
if request.method == "POST":
request.session["search-files"] = request.POST
form = SearchResult(request.POST)
if form.is_valid():
maxquant_runs = Result.objects.filter(
raw_file__pipeline__slug=pipeline,
raw_file__orig_file__iregex=form.cleaned_data["raw_file"],
)
page = request.GET.get("page", 1)
paginator = Paginator(maxquant_runs, settings.PAGINATE)
try:
maxquant_runs = paginator.page(page)
except PageNotAnInteger:
maxquant_runs = paginator.page(1)
except EmptyPage:
maxquant_runs = paginator.page(paginator.num_pages)
project = Project.objects.get(slug=project)
pipeline = Pipeline.objects.get(project=project, slug=pipeline)
context = dict(maxquant_runs=maxquant_runs, project=project, pipeline=pipeline)
context["home_title"] = settings.HOME_TITLE
context["form"] = form
return render(request, "proteomics/pipeline_detail.html", context)
def pipeline_download_file(request, pk):
pipeline_pk = pk
maxquant_runs = Result.objects.filter(
raw_file__pipeline__pk=pipeline_pk, raw_file__use_downstream=True
)
fn = request.GET.get("file")
pipeline = Pipeline.objects.get(pk=pipeline_pk)
project = pipeline.project
project_name = project.name
stream = BytesIO()
dfs = []
for mq_run in maxquant_runs:
df = mq_run.get_data_from_file(fn)
if df is None:
continue
dfs.append(df)
if dfs == []:
raise Http404(f"No file named {fn} found on the server.")
data = pd.concat(dfs).set_index("RawFile").reset_index()
stream.write(data.to_csv(index=False).encode())
stream.seek(0)
response = HttpResponse(stream, content_type="text/csv")
fn = f"{today()}_{project}_{pipeline}__{fn.replace('.txt', '')}.csv"
response["Content-Disposition"] = 'attachment; filename="{}"'.format(fn)
return response
class ResultDetailView(LoginRequiredMixin, generic.DetailView):
model = Result
template_name = "proteomics/maxquant_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
mq_run = context["object"]
path = mq_run.output_dir_maxquant
path_rt = mq_run.output_dir_rawtools
context["name"] = context["object"].name
context["project"] = mq_run.raw_file.pipeline.project
context["pipeline"] = mq_run.raw_file.pipeline
raw_fn = mq_run.raw_file
context["raw_file"] = raw_fn
figures = []
fn = f"{path_rt}/{raw_fn}_Ms_TIC_chromatogram.txt"
if isfile(fn):
df = (
pd.read_csv(fn, sep="\t")
.rename(columns={"RetentionTime": "Retention time"})
.set_index("Retention time")
)
fig = lines_plot(df, cols=["Intensity"], title="Ms TIC chromatogram")
figures.append(plotly_fig_to_div(fig))
fig = histograms(df, cols=["Intensity"], title="Ms TIC histogram")
figures.append(plotly_fig_to_div(fig))
fn = f"{path_rt}/{raw_fn}_Ms2_TIC_chromatogram.txt"
if isfile(fn):
df = (
pd.read_csv(fn, sep="\t")
.rename(columns={"RetentionTime": "Retention time"})
.set_index("Retention time")
)
fig = lines_plot(df, cols=["Intensity"], title="Ms2 TIC chromatogram")
figures.append(plotly_fig_to_div(fig))
fig = histograms(df, cols=["Intensity"], title="Ms2 TIC histogram")
figures.append(plotly_fig_to_div(fig))
fn = f"{path}/evidence.txt"
if isfile(fn):
msms = pd.read_csv(fn, sep="\t").set_index("Retention time").sort_index()
cols = [
"Length",
#'Oxidation (M)', 'Missed cleavages', 'MS/MS m/z',
"Charge",
"m/z",
"Mass",
]
for col in cols:
fig = lines_plot(msms, cols=[col], title=f"Evidence: {col}")
figures.append(plotly_fig_to_div(fig))
fig = histograms(msms, cols=[col], title=f"Evidence: {col} (histogram)")
figures.append(plotly_fig_to_div(fig))
fn = f"{path}/msmsScans.txt"
if isfile(fn):
msms = pd.read_csv(fn, sep="\t").set_index("Retention time")
cols = [
#'Total ion current',
#'m/z', 'Base peak intensity'
]
for col in cols:
fig = lines_plot(msms, cols=[col], title=f"MSMS: {col}")
figures.append(plotly_fig_to_div(fig))
fn = f"{path}/peptides.txt"
if isfile(fn):
peptides = pd.read_csv(fn, sep="\t")
cols = ["Length", "Mass"]
for col in cols:
# fig = lines_plot(peptides, cols=[col], title=f'Peptide: {col}')
# figures.append( plotly_fig_to_div(fig) )
fig = histograms(
peptides, cols=[col], title=f"Peptide: {col} (histogram)"
)
figures.append(plotly_fig_to_div(fig))
fn = f"{path}/proteinGroups.txt"
if isfile(fn):
proteins = pd.read_csv(fn, sep="\t")
cols = ["Mol. weight [kDa]", "Unique sequence coverage [%]"]
for col in cols:
# fig = lines_plot(proteins, cols=[col], title=f'Protein: {col}')
# figures.append( plotly_fig_to_div(fig) )
fig = histograms(
proteins, cols=[col], title=f"Protein: {col} (histogram)"
)
figures.append(plotly_fig_to_div(fig))
context["figures"] = figures
context["home_title"] = settings.HOME_TITLE
return context
def maxquant_download(request, pk):
mq_run = Result.objects.get(pk=pk)
response = HttpResponse(mq_run.download, content_type="application/zip")
fn = f"{mq_run.name}.zip"
response["Content-Disposition"] = 'attachment; filename="{}"'.format(fn)
return response
class UploadRaw(LoginRequiredMixin, View):
def get(self, request, pk=None):
pipeline = Pipeline.objects.get(pk=pk)
project = pipeline.project
context = {
"project": project,
"home_title": settings.HOME_TITLE,
"pipeline": pipeline,
}
return render(request, "proteomics/upload.html", context)
def post(self, request):
form = BasicUploadForm(self.request.POST, self.request.FILES)
logging.warning("RAW upload")
project_id = request.POST.get("project")
pipeline_id = request.POST.get("pipeline")
logging.warning(f"Upload to: {project_id} / {pipeline_id}")
pipeline = Pipeline.objects.get(pk=pipeline_id)
project = pipeline.project
logging.warning(f"Upload to: {project.name} / {pipeline.name}")
if form.is_valid():
_file = form.cleaned_data["orig_file"]
_file = RawFile.objects.create(orig_file=_file, pipeline=pipeline)
if str(_file.name).lower().endswith(".raw"):
_file.save()
data = {"is_valid": True, "name": str(_file.name), "url": str(_file.path)}
else:
data = {"is_valid": False}
return JsonResponse(data)
|
LewisResearchGroup/ProteomicsQC
|
app/maxquant/views.py
|
views.py
|
py
| 8,789 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "forms.SearchResult",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "models.Result.objects.filter",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "models.Result.objects",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "models.Result",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "forms.SearchResult",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "models.Result.objects.filter",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "models.Result.objects",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "models.Result",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.PAGINATE",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.PageNotAnInteger",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "project.models",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "project.models.Project.objects.get",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "project.models.Project.objects",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "project.models.Project",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "models.Pipeline.objects.get",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "models.Pipeline.objects",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "models.Pipeline",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "project.models",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "project.models",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.HOME_TITLE",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "models.Result.objects.filter",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "models.Result.objects",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "models.Result",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "models.Pipeline.objects.get",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "models.Pipeline.objects",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "models.Pipeline",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "project.models",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "project.models.name",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "project.models",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "django.http.Http404",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "lrg_omics.common.today",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "project.models",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "django.views.generic.DetailView",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "models.Result",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "lrg_omics.proteomics.rawtools.plotly.lines_plot",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "lrg_omics.plotly_tools.plotly_fig_to_div",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "lrg_omics.proteomics.rawtools.plotly.histograms",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "lrg_omics.plotly_tools.plotly_fig_to_div",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "lrg_omics.proteomics.rawtools.plotly.lines_plot",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "lrg_omics.plotly_tools.plotly_fig_to_div",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "lrg_omics.proteomics.rawtools.plotly.histograms",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "lrg_omics.plotly_tools.plotly_fig_to_div",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "lrg_omics.proteomics.rawtools.plotly.lines_plot",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "lrg_omics.plotly_tools.plotly_fig_to_div",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "lrg_omics.proteomics.rawtools.plotly.histograms",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "lrg_omics.plotly_tools.plotly_fig_to_div",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "lrg_omics.proteomics.rawtools.plotly.lines_plot",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "lrg_omics.plotly_tools.plotly_fig_to_div",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "lrg_omics.proteomics.rawtools.plotly.histograms",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "lrg_omics.plotly_tools.plotly_fig_to_div",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "lrg_omics.proteomics.rawtools.plotly.histograms",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "lrg_omics.plotly_tools.plotly_fig_to_div",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.HOME_TITLE",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "models.Result.objects.get",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "models.Result.objects",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "models.Result",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "models.Pipeline.objects.get",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "models.Pipeline.objects",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "models.Pipeline",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "project.models",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "project.models",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.HOME_TITLE",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "forms.BasicUploadForm",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "models.Pipeline.objects.get",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "models.Pipeline.objects",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "models.Pipeline",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "project.models",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "logging.warning",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "project.models.name",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "project.models",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "models.RawFile.objects.create",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "models.RawFile.objects",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "models.RawFile",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 250,
"usage_type": "call"
}
] |
44738680071
|
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
from heapq import nlargest
class SpacyStrategy:
def summarize_from_text(self, text):
raw_text = text
stopwords = list(STOP_WORDS)
nlp = spacy.load('en')
docx = nlp(raw_text)
# Build Word Frequency
# word.text is tokenization in spacy
word_frequencies = {}
for word in docx:
if word.text not in stopwords:
if word.text not in word_frequencies.keys():
word_frequencies[word.text] = 1
else:
word_frequencies[word.text] += 1
maximum_frequency = max(word_frequencies.values())
for word in word_frequencies.keys():
word_frequencies[word] = (word_frequencies[word] / maximum_frequency)
# Sentence Tokens
sentence_list = [sentence for sentence in docx.sents]
# Calculate Sentence Score and Ranking
sentence_scores = {}
for sent in sentence_list:
for word in sent:
if word.text.lower() in word_frequencies.keys():
if len(sent.text.split(' ')) < 30:
if sent not in sentence_scores.keys():
sentence_scores[sent] = word_frequencies[word.text.lower()]
else:
sentence_scores[sent] += word_frequencies[word.text.lower()]
# Find N Largest
summary_sentences = nlargest(7, sentence_scores, key=sentence_scores.get)
final_sentences = [w.text for w in summary_sentences]
summary = ' '.join(final_sentences)
return summary.strip()
|
andredantasrocha/contact-summarizer
|
summarization/spacy_strategy.py
|
spacy_strategy.py
|
py
| 1,691 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "spacy.lang.en.stop_words.STOP_WORDS",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "spacy.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "heapq.nlargest",
"line_number": 43,
"usage_type": "call"
}
] |
6827006318
|
'''
$Id: gvars.py 44 2010-10-11 11:24:33Z [email protected] $
'''
from datetime import datetime, date
from models import Global_Var
from django.conf import settings
from django.core.cache import cache
''' see gsettings.models.Global_Var_Type '''
VAR_TYPE_STRING = 1
VAR_TYPE_INT = 2
VAR_TYPE_FLOAT = 3
VAR_TYPE_DATETIME = 4
VAR_TYPE_DATE = 5
def get_value(name, category_name=None):
''' Returns the value of a variable from the cache, or the database if the cache is empty/expired/disabled.
Returns None if variable is not found.
'''
ret = None
vars = __get_vars_from_cache()
if vars is not None:
if name in vars:
ret = vars[name]
if category_name is None:
# pick the first category
for i in ret:
ret = ret[i]
break
else:
if category_name in ret:
ret = ret[category_name]
else:
ret = get_value_db(name, category_name)
return ret
def get_value_db(name, category_name=None):
''' Returns the value of a variable from the database.
Never tries the cache.
Returns None if the variable is not found.'''
ret = None
var = Global_Var.objects.filter(name=name)
if category_name is not None:
var = var.filter(global_var_category__name=category_name)
if var.count():
var = var[0]
ret = __get_cast_var(var)
return ret
def cache_values(force_reload = False):
raise Exception('cache_values() is deprecated. GSettings caching uses your projects cache settings (see CACHE_BACKEND)')
''' --------------- INTERNAL FUNCTIONS -------------- '''
def __get_vars():
''' Returns a dictionary with all the gsettings variable values.
Variables are read from the cache (if caching is enabled AND cache is not empty AND not expired)
or the database (otherwise).
Repopulate the cache if necessary.
'''
ret = __get_vars_from_cache()
if ret is None: ret = __get_vars_from_db()
return ret
def __get_vars_from_cache():
''' Returns a dictionary with all the gsettings variable values.
Variables are read from the cache. If the cache is expired or empty, read from the DB and fill the cache.
If caching is disabled, returns None.
'''
ret = None
if __is_cache_enabled():
ret = cache.get('gsettings.vars', None)
if ret is None:
# get all the data from the DB to a dictionary
ret = __get_vars_from_db()
timeout = getattr(settings, 'GSETTINGS_CACHE_TIMEOUT', None)
cache.set('gsettings.vars', ret, timeout)
return ret
def __get_vars_from_db():
''' Returns a dictionary with all the gsettings variable values.
Variables are read from the database. It neither read from nor write to the cache.
'''
ret = {}
for var in Global_Var.objects.all():
if var.name not in ret:
ret[var.name] = {}
ret[var.name][var.global_var_category.name] = __get_cast_var(var)
return ret
def __get_cast_var(var):
''' Returns the value of a variable. it is cast into the proper python type. '''
return __get_cast_value(var.value, var.global_var_type.id)
def __get_cast_value(value, typeid):
''' Returns the value of a variable. it is cast into the proper python type. '''
ret = value
try:
if typeid == VAR_TYPE_INT: ret = int(ret)
if typeid == VAR_TYPE_FLOAT: ret = float(ret)
if typeid == VAR_TYPE_DATE:
parts = ret.split('-')
ret = date(int(parts[0]), int(parts[1]), int(parts[2]))
if typeid == VAR_TYPE_DATETIME:
parts = ret.split(' ')
parts_date = parts[0].split('-')
parts_time = parts[1].split(':')
ret = datetime(int(parts_date[0]), int(parts_date[1]), int(parts_date[2]), int(parts_time[0]), int(parts_time[1]), int(parts_time[2]))
except:
raise ValueError('Invalid format.')
return ret
def __get_string_from_value(value):
''' Returns a string from a python value.
The format of this string is compatible with database format for global variables. '''
ret = value
type_name = value.__class__.__name__
if type_name == 'int':
str(ret)
if type_name == 'float':
str(ret)
if type_name == 'date':
ret.__str__()
if type_name == 'datetime':
ret.__str__()
return ret
def __is_cache_enabled():
''' Returns True if caching is enabled in the project settings. '''
return (getattr(settings, 'CACHE_BACKEND', None) is not None)
#''' --------------- CACHING -------------- '''
#from django.core.signals import request_started
#
#def __on_request_started(sender, **kwargs):
# # Clear the existing cache so if the database has been modified
# # by a previous request we don't return stale values.
## global cache
## cache = None
## print 'cache = None'
## from django.conf import settings
## if getattr(settings, 'GSETTINGS_AUTO_CACHE', False):
## cache_values()
# pass
#
#request_started.connect(__on_request_started)
|
kingsdigitallab/eel
|
django/gsettings/gvars.py
|
gvars.py
|
py
| 5,370 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.Global_Var.objects.filter",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "models.Global_Var.objects",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "models.Global_Var",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.core.cache.cache.get",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "django.conf.settings",
"line_number": 77,
"usage_type": "argument"
},
{
"api_name": "django.core.cache.cache.set",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "models.Global_Var.objects.all",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "models.Global_Var.objects",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "models.Global_Var",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "django.conf.settings",
"line_number": 132,
"usage_type": "argument"
}
] |
4668550364
|
from django.core.exceptions import PermissionDenied
class UserIsAuthorMixin(object):
"""
Checks that the user is the author of the object. If they are not, raise a
403 error
"""
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated and request.user.profile.pk is not self.get_object().author.pk:
raise PermissionDenied
return super(UserIsAuthorMixin, self).dispatch(request, *args, **kwargs)
|
hotwire-django/hotwire-django-realworld
|
articles/mixins.py
|
mixins.py
|
py
| 468 |
python
|
en
|
code
| 31 |
github-code
|
6
|
[
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 11,
"usage_type": "name"
}
] |
29150918130
|
import os
import torch
import torchmetrics
from pathlib import Path
# Huggingface datasets and tokenizers
from tokenizers import Tokenizer
from tokenizers.models import WordLevel
from tokenizers.trainers import WordLevelTrainer
from tokenizers.pre_tokenizers import Whitespace
os.environ["TOKENIZERS_PARALLELISM"] = "true"
def get_device():
device_count = 1
if torch.cuda.is_available():
device = "cuda"
device_count = torch.cuda.device_count()
elif torch.backends.mps.is_available():
device = "mps"
else:
device = "cpu"
print(f"Devices Found: {device_count} x {device}")
return device, device_count
def get_or_build_tokenizer(file_path, ds, lang):
tokenizer_path = Path(file_path)
if not Path.exists(tokenizer_path):
# Most code taken from: https://huggingface.co/docs/tokenizers/quicktour
tokenizer = Tokenizer(WordLevel(unk_token='[UNK]'))
tokenizer.pre_tokenizer = Whitespace()
trainer = WordLevelTrainer(special_tokens=['[UNK]', '[PAD]', '[SOS]', '[EOS]'], min_frequency=4)
tokenizer.train_from_iterator((item['translation'][lang] for item in ds), trainer=trainer)
tokenizer.save(str(tokenizer_path))
else:
tokenizer = Tokenizer.from_file(str(tokenizer_path))
return tokenizer
def causal_mask(seq_len):
# (seq_len, seq_len)
return torch.tril(torch.ones(seq_len, seq_len, dtype=torch.int))
def greedy_decode(model, source, source_mask, device='cpu'):
tokenizer_tgt = model.tgt_tokenizer
sos_idx = tokenizer_tgt.token_to_id('[SOS]')
eos_idx = tokenizer_tgt.token_to_id('[EOS]')
# Precompute the encoder output and reuse it for every step
encoder_output = model.encode(source, source_mask)
# Initialize the decoder input with the sos token
decoder_input = torch.empty(1, 1).fill_(sos_idx).type_as(source).to(device)
while True:
if decoder_input.shape[1] == 161:
break
# build mask for target
decoder_mask = causal_mask(decoder_input.shape[1]).type_as(source_mask).to(device)
# calculate output
out = model.decode(encoder_output, source_mask, decoder_input, decoder_mask)
# get next token for input from last token of output
prob = model.project(out[:, -1, :])
_, next_word = torch.max(prob, dim=1)
decoder_input = torch.cat(
[decoder_input, torch.empty(1, 1).type_as(source).fill_(next_word.item()).to(device)], dim=1
)
if next_word == eos_idx:
break
return decoder_input.squeeze(0)
def run_validation(model, val_dataloader, print_msg, writer=None, global_step=0, num_examples=2, device='cpu'):
model.eval()
count = 0
source_texts = []
expected = []
predicted = []
try:
# get the console window width
with os.popen('stty size', 'r') as console:
_, console_width = console.read().split()
console_width = int(console_width)
except:
# If we can't get the console width, use 80 as default
console_width = 80
with torch.no_grad():
for batch in val_dataloader:
count += 1
encoder_input = batch["encoder_input"].to(device) # (B, seq_len)
encoder_mask = batch["encoder_mask"].to(device) # (B, 1, 1, seq_len)
# check that the batch size is 1
assert encoder_input.shape[0] == 1, "Batch size must be 1 for validation"
model_out = greedy_decode(model, encoder_input, encoder_mask)
source_text = batch["src_text"][0]
target_text = batch["tgt_text"][0]
model_out_text = model.tgt_tokenizer.decode(model_out.detach().cpu().numpy())
source_texts.append(source_text)
expected.append(target_text)
predicted.append(model_out_text)
# Print the source, target and model output
print_msg('-' * console_width)
print_msg(f"{f'SOURCE: ':>12}{source_text}")
print_msg(f"{f'TARGET: ':>12}{target_text}")
print_msg(f"{f'PREDICTED: ':>12}{model_out_text}")
if count == num_examples:
print_msg('-' * console_width)
break
# Evaluate the character error rate
# Compute the char error rate
metric = torchmetrics.CharErrorRate()
cer = metric(predicted, expected)
print('validation cer', cer)
# Compute the word error rate
metric = torchmetrics.WordErrorRate()
wer = metric(predicted, expected)
print('validation wer', wer)
# Compute the BLEU metric
metric = torchmetrics.BLEUScore()
bleu = metric(predicted, expected)
print('validation BLEU', bleu)
if writer:
writer.add_scalar('validation cer', cer, global_step)
writer.flush()
writer.add_scalar('validation wer', wer, global_step)
writer.flush()
writer.add_scalar('validation BLEU', bleu, global_step)
writer.flush()
|
swapniel99/erav1s15
|
utils.py
|
utils.py
|
py
| 5,016 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.device_count",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.backends.mps.is_available",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.backends",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.exists",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "tokenizers.Tokenizer",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tokenizers.models.WordLevel",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tokenizers.pre_tokenizers.Whitespace",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tokenizers.trainers.WordLevelTrainer",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tokenizers.Tokenizer.from_file",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tokenizers.Tokenizer",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "torch.tril",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.int",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "torch.empty",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.empty",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torchmetrics.CharErrorRate",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torchmetrics.WordErrorRate",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "torchmetrics.BLEUScore",
"line_number": 137,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.