code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Notifications
-------------
Example showing how to add notifications to a characteristic and handle the responses.
Updated on 2019-07-03 by hbldh <<EMAIL>>
"""
import sys
import logging
import asyncio
import platform
from bleak import BleakClient
from bleak import _logger as logger
CHARACTERISTIC_UUID = "f000aa65-0451-4000-b000-000000000000" # <--- Change to the characteristic you want to enable notifications from.
ADDRESS = (
"24:71:89:cc:09:05" # <--- Change to your device's address here if you are using Windows or Linux
if platform.system() != "Darwin"
else "B9EA5233-37EF-4DD6-87A8-2A875E821C46" # <--- Change to your device's address here if you are using macOS
)
if len(sys.argv) == 3:
ADDRESS = sys.argv[1]
CHARACTERISTIC_UUID = sys.argv[2]
def notification_handler(sender, data):
"""Simple notification handler which prints the data received."""
print("{0}: {1}".format(sender, data))
async def run(address, debug=False):
if debug:
import sys
l = logging.getLogger("asyncio")
l.setLevel(logging.DEBUG)
h = logging.StreamHandler(sys.stdout)
h.setLevel(logging.DEBUG)
l.addHandler(h)
logger.addHandler(h)
async with BleakClient(address) as client:
logger.info(f"Connected: {client.is_connected}")
await client.start_notify(CHARACTERISTIC_UUID, notification_handler)
await asyncio.sleep(5.0)
await client.stop_notify(CHARACTERISTIC_UUID)
if __name__ == "__main__":
import os
os.environ["PYTHONASYNCIODEBUG"] = str(1)
loop = asyncio.get_event_loop()
# loop.set_debug(True)
loop.run_until_complete(run(ADDRESS, True))
| [
"logging.getLogger",
"logging.StreamHandler",
"asyncio.sleep",
"platform.system",
"bleak._logger.info",
"bleak.BleakClient",
"asyncio.get_event_loop",
"bleak._logger.addHandler"
]
| [((1619, 1643), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1641, 1643), False, 'import asyncio\n'), ((577, 594), 'platform.system', 'platform.system', ([], {}), '()\n', (592, 594), False, 'import platform\n'), ((1052, 1080), 'logging.getLogger', 'logging.getLogger', (['"""asyncio"""'], {}), "('asyncio')\n", (1069, 1080), False, 'import logging\n'), ((1127, 1160), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1148, 1160), False, 'import logging\n'), ((1227, 1247), 'bleak._logger.addHandler', 'logger.addHandler', (['h'], {}), '(h)\n', (1244, 1247), True, 'from bleak import _logger as logger\n'), ((1264, 1284), 'bleak.BleakClient', 'BleakClient', (['address'], {}), '(address)\n', (1275, 1284), False, 'from bleak import BleakClient\n'), ((1304, 1352), 'bleak._logger.info', 'logger.info', (['f"""Connected: {client.is_connected}"""'], {}), "(f'Connected: {client.is_connected}')\n", (1315, 1352), True, 'from bleak import _logger as logger\n'), ((1445, 1463), 'asyncio.sleep', 'asyncio.sleep', (['(5.0)'], {}), '(5.0)\n', (1458, 1463), False, 'import asyncio\n')] |
from django.shortcuts import render,redirect
from .forms import usernameForm,DateForm,UsernameAndDateForm, DateForm_2
from django.contrib import messages
from django.contrib.auth.models import User
import cv2
import dlib
import imutils
from imutils import face_utils
from imutils.video import VideoStream
from imutils.face_utils import rect_to_bb
from imutils.face_utils import FaceAligner
import time
from attendance_system_facial_recognition.settings import BASE_DIR
import os
import face_recognition
from face_recognition.face_recognition_cli import image_files_in_folder
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
import numpy as np
from django.contrib.auth.decorators import login_required
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import datetime
from django_pandas.io import read_frame
from users.models import Present, Time
import seaborn as sns
import pandas as pd
from django.db.models import Count
#import mpld3
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
from matplotlib import rcParams
import math
mpl.use('Agg')
#utility functions:
def username_present(username):
if User.objects.filter(username=username).exists():
return True
return False
def create_dataset(username):
id = username
if(os.path.exists('face_recognition_data/training_dataset/{}/'.format(id))==False):
os.makedirs('face_recognition_data/training_dataset/{}/'.format(id))
directory='face_recognition_data/training_dataset/{}/'.format(id)
# Detect face
#Loading the HOG face detector and the shape predictpr for allignment
print("[INFO] Loading the facial detector")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
fa = FaceAligner(predictor , desiredFaceWidth = 96)
#capture images from the webcam and process and detect the face
# Initialize the video stream
print("[INFO] Initializing Video stream")
vs = VideoStream(src=0).start()
#time.sleep(2.0) ####CHECK######
# Our identifier
# We will put the id here and we will store the id with a face, so that later we can identify whose face it is
# Our dataset naming counter
sampleNum = 0
# Capturing the faces one by one and detect the faces and showing it on the window
while(True):
# Capturing the image
#vs.read each frame
frame = vs.read()
#Resize each image
frame = imutils.resize(frame ,width = 800)
#the returned img is a colored image but for the classifier to work we need a greyscale image
#to convert
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#To store the faces
#This will detect all the images in the current frame, and it will return the coordinates of the faces
#Takes in image and some other parameter for accurate result
faces = detector(gray_frame,0)
#In above 'faces' variable there can be multiple faces so we have to get each and every face and draw a rectangle around it.
for face in faces:
print("inside for loop")
(x,y,w,h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame,gray_frame,face)
# Whenever the program captures the face, we will write that is a folder
# Before capturing the face, we need to tell the script whose face it is
# For that we will need an identifier, here we call it id
# So now we captured a face, we need to write it in a file
sampleNum = sampleNum+1
# Saving the image dataset, but only the face part, cropping the rest
if face is None:
print("face is none")
continue
cv2.imwrite(directory+'/'+str(sampleNum)+'.jpg' , face_aligned)
face_aligned = imutils.resize(face_aligned ,width = 400)
#cv2.imshow("Image Captured",face_aligned)
# @params the initial point of the rectangle will be x,y and
# @params end point will be x+width and y+height
# @params along with color of the rectangle
# @params thickness of the rectangle
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
cv2.waitKey(50)
#Showing the image in another window
#Creates a window with window name "Face" and with the image img
cv2.imshow("Add Images",frame)
#Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
cv2.waitKey(1)
#To get out of the loop
if(sampleNum>300):
break
#Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
def predict(face_aligned,svc,threshold=0.7):
face_encodings=np.zeros((1,128))
try:
x_face_locations=face_recognition.face_locations(face_aligned)
faces_encodings=face_recognition.face_encodings(face_aligned,known_face_locations=x_face_locations)
if(len(faces_encodings)==0):
return ([-1],[0])
except:
return ([-1],[0])
prob=svc.predict_proba(faces_encodings)
result=np.where(prob[0]==np.amax(prob[0]))
if(prob[0][result[0]]<=threshold):
return ([-1],prob[0][result[0]])
return (result[0],prob[0][result[0]])
def vizualize_Data(embedded, targets,):
X_embedded = TSNE(n_components=2).fit_transform(embedded)
for i, t in enumerate(set(targets)):
idx = targets == t
plt.scatter(X_embedded[idx, 0], X_embedded[idx, 1], label=t)
plt.legend(bbox_to_anchor=(1, 1));
rcParams.update({'figure.autolayout': True})
plt.tight_layout()
plt.savefig('./recognition/static/recognition/img/training_visualisation.png')
plt.close()
def update_attendance_in_db_in(present):
today=datetime.date.today()
time=datetime.datetime.now()
for person in present:
user=User.objects.get(username=person)
try:
qs=Present.objects.get(user=user,date=today)
except :
qs= None
if qs is None:
if present[person]==True:
a=Present(user=user,date=today,present=True)
a.save()
else:
a=Present(user=user,date=today,present=False)
a.save()
else:
if present[person]==True:
qs.present=True
qs.save(update_fields=['present'])
if present[person]==True:
a=Time(user=user,date=today,time=time, out=False)
a.save()
def update_attendance_in_db_out(present):
today=datetime.date.today()
time=datetime.datetime.now()
for person in present:
user=User.objects.get(username=person)
if present[person]==True:
a=Time(user=user,date=today,time=time, out=True)
a.save()
def check_validity_times(times_all):
if(len(times_all)>0):
sign=times_all.first().out
else:
sign=True
times_in=times_all.filter(out=False)
times_out=times_all.filter(out=True)
if(len(times_in)!=len(times_out)):
sign=True
break_hourss=0
if(sign==True):
check=False
break_hourss=0
return (check,break_hourss)
prev=True
prev_time=times_all.first().time
for obj in times_all:
curr=obj.out
if(curr==prev):
check=False
break_hourss=0
return (check,break_hourss)
if(curr==False):
curr_time=obj.time
to=curr_time
ti=prev_time
break_time=((to-ti).total_seconds())/3600
break_hourss+=break_time
else:
prev_time=obj.time
prev=curr
return (True,break_hourss)
def convert_hours_to_hours_mins(hours):
h=int(hours)
hours-=h
m=hours*60
m=math.ceil(m)
return str(str(h)+ " hrs " + str(m) + " mins")
#used
def hours_vs_date_given_employee(present_qs,time_qs,admin=True):
register_matplotlib_converters()
df_hours=[]
df_break_hours=[]
qs=present_qs
for obj in qs:
date=obj.date
times_in=time_qs.filter(date=date).filter(out=False).order_by('time')
times_out=time_qs.filter(date=date).filter(out=True).order_by('time')
times_all=time_qs.filter(date=date).order_by('time')
obj.time_in=None
obj.time_out=None
obj.hours=0
obj.break_hours=0
if (len(times_in)>0):
obj.time_in=times_in.first().time
if (len(times_out)>0):
obj.time_out=times_out.last().time
if(obj.time_in is not None and obj.time_out is not None):
ti=obj.time_in
to=obj.time_out
hours=((to-ti).total_seconds())/3600
obj.hours=hours
else:
obj.hours=0
(check,break_hourss)= check_validity_times(times_all)
if check:
obj.break_hours=break_hourss
else:
obj.break_hours=0
df_hours.append(obj.hours)
df_break_hours.append(obj.break_hours)
obj.hours=convert_hours_to_hours_mins(obj.hours)
obj.break_hours=convert_hours_to_hours_mins(obj.break_hours)
df = read_frame(qs)
df["hours"]=df_hours
df["break_hours"]=df_break_hours
print(df)
sns.barplot(data=df,x='date',y='hours')
plt.xticks(rotation='vertical')
rcParams.update({'figure.autolayout': True})
plt.tight_layout()
if(admin):
plt.savefig('./recognition/static/recognition/img/attendance_graphs/hours_vs_date/1.png')
plt.close()
else:
plt.savefig('./recognition/static/recognition/img/attendance_graphs/employee_login/1.png')
plt.close()
return qs
#used
def hours_vs_employee_given_date(present_qs,time_qs):
register_matplotlib_converters()
df_hours=[]
df_break_hours=[]
df_username=[]
qs=present_qs
for obj in qs:
user=obj.user
times_in=time_qs.filter(user=user).filter(out=False)
times_out=time_qs.filter(user=user).filter(out=True)
times_all=time_qs.filter(user=user)
obj.time_in=None
obj.time_out=None
obj.hours=0
obj.hours=0
if (len(times_in)>0):
obj.time_in=times_in.first().time
if (len(times_out)>0):
obj.time_out=times_out.last().time
if(obj.time_in is not None and obj.time_out is not None):
ti=obj.time_in
to=obj.time_out
hours=((to-ti).total_seconds())/3600
obj.hours=hours
else:
obj.hours=0
(check,break_hourss)= check_validity_times(times_all)
if check:
obj.break_hours=break_hourss
else:
obj.break_hours=0
df_hours.append(obj.hours)
df_username.append(user.username)
df_break_hours.append(obj.break_hours)
obj.hours=convert_hours_to_hours_mins(obj.hours)
obj.break_hours=convert_hours_to_hours_mins(obj.break_hours)
df = read_frame(qs)
df['hours']=df_hours
df['username']=df_username
df["break_hours"]=df_break_hours
sns.barplot(data=df,x='username',y='hours')
plt.xticks(rotation='vertical')
rcParams.update({'figure.autolayout': True})
plt.tight_layout()
plt.savefig('./recognition/static/recognition/img/attendance_graphs/hours_vs_employee/1.png')
plt.close()
return qs
def total_number_employees():
qs=User.objects.all()
return (len(qs) -1)
# -1 to account for admin
def employees_present_today():
today=datetime.date.today()
qs=Present.objects.filter(date=today).filter(present=True)
return len(qs)
#used
def this_week_emp_count_vs_date():
today=datetime.date.today()
some_day_last_week=today-datetime.timedelta(days=7)
monday_of_last_week=some_day_last_week- datetime.timedelta(days=(some_day_last_week.isocalendar()[2] - 1))
monday_of_this_week = monday_of_last_week + datetime.timedelta(days=7)
qs=Present.objects.filter(date__gte=monday_of_this_week).filter(date__lte=today)
str_dates=[]
emp_count=[]
str_dates_all=[]
emp_cnt_all=[]
cnt=0
for obj in qs:
date=obj.date
str_dates.append(str(date))
qs=Present.objects.filter(date=date).filter(present=True)
emp_count.append(len(qs))
while(cnt<5):
date=str(monday_of_this_week+datetime.timedelta(days=cnt))
cnt+=1
str_dates_all.append(date)
if(str_dates.count(date))>0:
idx=str_dates.index(date)
emp_cnt_all.append(emp_count[idx])
else:
emp_cnt_all.append(0)
df=pd.DataFrame()
df["date"]=str_dates_all
df["Number of employees"]=emp_cnt_all
sns.lineplot(data=df,x='date',y='Number of employees')
plt.savefig('./recognition/static/recognition/img/attendance_graphs/this_week/1.png')
plt.close()
#used
def last_week_emp_count_vs_date():
today=datetime.date.today()
some_day_last_week=today-datetime.timedelta(days=7)
monday_of_last_week=some_day_last_week- datetime.timedelta(days=(some_day_last_week.isocalendar()[2] - 1))
monday_of_this_week = monday_of_last_week + datetime.timedelta(days=7)
qs=Present.objects.filter(date__gte=monday_of_last_week).filter(date__lt=monday_of_this_week)
str_dates=[]
emp_count=[]
str_dates_all=[]
emp_cnt_all=[]
cnt=0
for obj in qs:
date=obj.date
str_dates.append(str(date))
qs=Present.objects.filter(date=date).filter(present=True)
emp_count.append(len(qs))
while(cnt<5):
date=str(monday_of_last_week+datetime.timedelta(days=cnt))
cnt+=1
str_dates_all.append(date)
if(str_dates.count(date))>0:
idx=str_dates.index(date)
emp_cnt_all.append(emp_count[idx])
else:
emp_cnt_all.append(0)
df=pd.DataFrame()
df["date"]=str_dates_all
df["emp_count"]=emp_cnt_all
sns.lineplot(data=df,x='date',y='emp_count')
plt.savefig('./recognition/static/recognition/img/attendance_graphs/last_week/1.png')
plt.close()
# Create your views here.
def home(request):
return render(request, 'recognition/home.html')
@login_required
def dashboard(request):
if(request.user.username=='admin'):
print("admin")
return render(request, 'recognition/admin_dashboard.html')
else:
print("not admin")
return render(request,'recognition/employee_dashboard.html')
@login_required
def add_photos(request):
if request.user.username!='admin':
return redirect('not-authorised')
if request.method=='POST':
form=usernameForm(request.POST)
data = request.POST.copy()
username=data.get('username')
if username_present(username):
create_dataset(username)
messages.success(request, f'Dataset Created')
return redirect('add-photos')
else:
messages.warning(request, f'No such username found. Please register employee first.')
return redirect('dashboard')
else:
form=usernameForm()
return render(request,'recognition/add_photos.html', {'form' : form})
def mark_your_attendance(request):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
svc_save_path="face_recognition_data/svc.sav"
with open(svc_save_path, 'rb') as f:
svc = pickle.load(f)
fa = FaceAligner(predictor , desiredFaceWidth = 96)
encoder=LabelEncoder()
encoder.classes_ = np.load('face_recognition_data/classes.npy')
faces_encodings = np.zeros((1,128))
no_of_faces = len(svc.predict_proba(faces_encodings)[0])
count = dict()
present = dict()
log_time = dict()
start = dict()
for i in range(no_of_faces):
count[encoder.inverse_transform([i])[0]] = 0
present[encoder.inverse_transform([i])[0]] = False
vs = VideoStream(src=0).start()
sampleNum = 0
while(True):
frame = vs.read()
frame = imutils.resize(frame ,width = 800)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray_frame,0)
for face in faces:
print("INFO : inside for loop")
(x,y,w,h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame,gray_frame,face)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
(pred,prob)=predict(face_aligned,svc)
if(pred!=[-1]):
person_name=encoder.inverse_transform(np.ravel([pred]))[0]
pred=person_name
if count[pred] == 0:
start[pred] = time.time()
count[pred] = count.get(pred,0) + 1
if count[pred] == 4 and (time.time()-start[pred]) > 1.2:
count[pred] = 0
else:
#if count[pred] == 4 and (time.time()-start) <= 1.5:
present[pred] = True
log_time[pred] = datetime.datetime.now()
count[pred] = count.get(pred,0) + 1
print(pred, present[pred], count[pred])
cv2.putText(frame, str(person_name)+ str(prob), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
else:
person_name="unknown"
cv2.putText(frame, str(person_name), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
#cv2.putText()
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
#cv2.waitKey(50)
#Showing the image in another window
#Creates a window with window name "Face" and with the image img
cv2.imshow("Mark Attendance - In - Press q to exit",frame)
#Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
#cv2.waitKey(1)
#To get out of the loop
key=cv2.waitKey(50) & 0xFF
if(key==ord("q")):
break
#Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
update_attendance_in_db_in(present)
return redirect('home')
def mark_your_attendance_out(request):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
svc_save_path="face_recognition_data/svc.sav"
with open(svc_save_path, 'rb') as f:
svc = pickle.load(f)
fa = FaceAligner(predictor , desiredFaceWidth = 96)
encoder=LabelEncoder()
encoder.classes_ = np.load('face_recognition_data/classes.npy')
faces_encodings = np.zeros((1,128))
no_of_faces = len(svc.predict_proba(faces_encodings)[0])
count = dict()
present = dict()
log_time = dict()
start = dict()
for i in range(no_of_faces):
count[encoder.inverse_transform([i])[0]] = 0
present[encoder.inverse_transform([i])[0]] = False
vs = VideoStream(src=0).start()
sampleNum = 0
while(True):
frame = vs.read()
frame = imutils.resize(frame ,width = 800)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray_frame,0)
for face in faces:
print("INFO : inside for loop")
(x,y,w,h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame,gray_frame,face)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
(pred,prob)=predict(face_aligned,svc)
if(pred!=[-1]):
person_name=encoder.inverse_transform(np.ravel([pred]))[0]
pred=person_name
if count[pred] == 0:
start[pred] = time.time()
count[pred] = count.get(pred,0) + 1
if count[pred] == 4 and (time.time()-start[pred]) > 1.5:
count[pred] = 0
else:
#if count[pred] == 4 and (time.time()-start) <= 1.5:
present[pred] = True
log_time[pred] = datetime.datetime.now()
count[pred] = count.get(pred,0) + 1
print(pred, present[pred], count[pred])
cv2.putText(frame, str(person_name)+ str(prob), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
else:
person_name="unknown"
cv2.putText(frame, str(person_name), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
#cv2.putText()
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
#cv2.waitKey(50)
#Showing the image in another window
#Creates a window with window name "Face" and with the image img
cv2.imshow("Mark Attendance- Out - Press q to exit",frame)
#Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
#cv2.waitKey(1)
#To get out of the loop
key=cv2.waitKey(50) & 0xFF
if(key==ord("q")):
break
#Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
update_attendance_in_db_out(present)
return redirect('home')
@login_required
def train(request):
if request.user.username!='admin':
return redirect('not-authorised')
training_dir='face_recognition_data/training_dataset'
count=0
for person_name in os.listdir(training_dir):
curr_directory=os.path.join(training_dir,person_name)
if not os.path.isdir(curr_directory):
continue
for imagefile in image_files_in_folder(curr_directory):
count+=1
X=[]
y=[]
i=0
for person_name in os.listdir(training_dir):
print(str(person_name))
curr_directory=os.path.join(training_dir,person_name)
if not os.path.isdir(curr_directory):
continue
for imagefile in image_files_in_folder(curr_directory):
print(str(imagefile))
image=cv2.imread(imagefile)
try:
X.append((face_recognition.face_encodings(image)[0]).tolist())
y.append(person_name)
i+=1
except:
print("removed")
os.remove(imagefile)
targets=np.array(y)
encoder = LabelEncoder()
encoder.fit(y)
y=encoder.transform(y)
X1=np.array(X)
print("shape: "+ str(X1.shape))
np.save('face_recognition_data/classes.npy', encoder.classes_)
svc = SVC(kernel='linear',probability=True)
svc.fit(X1,y)
svc_save_path="face_recognition_data/svc.sav"
with open(svc_save_path, 'wb') as f:
pickle.dump(svc,f)
vizualize_Data(X1,targets)
messages.success(request, f'Training Complete.')
return render(request,"recognition/train.html")
@login_required
def not_authorised(request):
return render(request,'recognition/not_authorised.html')
@login_required
def view_attendance_home(request):
total_num_of_emp=total_number_employees()
emp_present_today=employees_present_today()
this_week_emp_count_vs_date()
last_week_emp_count_vs_date()
return render(request,"recognition/view_attendance_home.html", {'total_num_of_emp' : total_num_of_emp, 'emp_present_today': emp_present_today})
@login_required
def view_attendance_date(request):
if request.user.username!='admin':
return redirect('not-authorised')
qs=None
time_qs=None
present_qs=None
if request.method=='POST':
form=DateForm(request.POST)
if form.is_valid():
date=form.cleaned_data.get('date')
print("date:"+ str(date))
time_qs=Time.objects.filter(date=date)
present_qs=Present.objects.filter(date=date)
if(len(time_qs)>0 or len(present_qs)>0):
qs=hours_vs_employee_given_date(present_qs,time_qs)
return render(request,'recognition/view_attendance_date.html', {'form' : form,'qs' : qs })
else:
messages.warning(request, f'No records for selected date.')
return redirect('view-attendance-date')
else:
form=DateForm()
return render(request,'recognition/view_attendance_date.html', {'form' : form, 'qs' : qs})
@login_required
def view_attendance_employee(request):
if request.user.username!='admin':
return redirect('not-authorised')
time_qs=None
present_qs=None
qs=None
if request.method=='POST':
form=UsernameAndDateForm(request.POST)
if form.is_valid():
username=form.cleaned_data.get('username')
if username_present(username):
u=User.objects.get(username=username)
time_qs=Time.objects.filter(user=u)
present_qs=Present.objects.filter(user=u)
date_from=form.cleaned_data.get('date_from')
date_to=form.cleaned_data.get('date_to')
if date_to < date_from:
messages.warning(request, f'Invalid date selection.')
return redirect('view-attendance-employee')
else:
time_qs=time_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
present_qs=present_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
if (len(time_qs)>0 or len(present_qs)>0):
qs=hours_vs_date_given_employee(present_qs,time_qs,admin=True)
return render(request,'recognition/view_attendance_employee.html', {'form' : form, 'qs' :qs})
else:
#print("inside qs is None")
messages.warning(request, f'No records for selected duration.')
return redirect('view-attendance-employee')
else:
print("invalid username")
messages.warning(request, f'No such username found.')
return redirect('view-attendance-employee')
else:
form=UsernameAndDateForm()
return render(request,'recognition/view_attendance_employee.html', {'form' : form, 'qs' :qs})
@login_required
def view_my_attendance_employee_login(request):
if request.user.username=='admin':
return redirect('not-authorised')
qs=None
time_qs=None
present_qs=None
if request.method=='POST':
form=DateForm_2(request.POST)
if form.is_valid():
u=request.user
time_qs=Time.objects.filter(user=u)
present_qs=Present.objects.filter(user=u)
date_from=form.cleaned_data.get('date_from')
date_to=form.cleaned_data.get('date_to')
if date_to < date_from:
messages.warning(request, f'Invalid date selection.')
return redirect('view-my-attendance-employee-login')
else:
time_qs=time_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
present_qs=present_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
if (len(time_qs)>0 or len(present_qs)>0):
qs=hours_vs_date_given_employee(present_qs,time_qs,admin=False)
return render(request,'recognition/view_my_attendance_employee_login.html', {'form' : form, 'qs' :qs})
else:
messages.warning(request, f'No records for selected duration.')
return redirect('view-my-attendance-employee-login')
else:
form=DateForm_2()
return render(request,'recognition/view_my_attendance_employee_login.html', {'form' : form, 'qs' :qs}) | [
"cv2.rectangle",
"sklearn.preprocessing.LabelEncoder",
"users.models.Time",
"django.contrib.messages.warning",
"users.models.Time.objects.filter",
"cv2.imshow",
"django.contrib.auth.models.User.objects.filter",
"numpy.array",
"face_recognition.face_encodings",
"cv2.destroyAllWindows",
"face_recognition.face_recognition_cli.image_files_in_folder",
"users.models.Present.objects.get",
"pandas.plotting.register_matplotlib_converters",
"datetime.timedelta",
"django.contrib.auth.models.User.objects.all",
"django.contrib.auth.models.User.objects.get",
"numpy.save",
"imutils.face_utils.FaceAligner",
"django.shortcuts.render",
"os.remove",
"os.listdir",
"imutils.video.VideoStream",
"imutils.face_utils.rect_to_bb",
"django_pandas.io.read_frame",
"dlib.shape_predictor",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.close",
"dlib.get_frontal_face_detector",
"users.models.Present",
"django.shortcuts.redirect",
"os.path.isdir",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"cv2.waitKey",
"face_recognition.face_locations",
"matplotlib.pyplot.savefig",
"matplotlib.rcParams.update",
"matplotlib.pyplot.xticks",
"matplotlib.use",
"pickle.load",
"users.models.Present.objects.filter",
"seaborn.lineplot",
"cv2.cvtColor",
"datetime.date.today",
"cv2.imread",
"time.time",
"matplotlib.pyplot.legend",
"sklearn.svm.SVC",
"math.ceil",
"pickle.dump",
"os.path.join",
"datetime.datetime.now",
"numpy.zeros",
"imutils.resize",
"matplotlib.pyplot.tight_layout",
"django.contrib.messages.success",
"numpy.ravel",
"seaborn.barplot",
"numpy.load",
"numpy.amax"
]
| [((1205, 1219), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (1212, 1219), True, 'import matplotlib as mpl\n'), ((1771, 1803), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (1801, 1803), False, 'import dlib\n'), ((1817, 1905), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""face_recognition_data/shape_predictor_68_face_landmarks.dat"""'], {}), "(\n 'face_recognition_data/shape_predictor_68_face_landmarks.dat')\n", (1837, 1905), False, 'import dlib\n'), ((1978, 2021), 'imutils.face_utils.FaceAligner', 'FaceAligner', (['predictor'], {'desiredFaceWidth': '(96)'}), '(predictor, desiredFaceWidth=96)\n', (1989, 2021), False, 'from imutils.face_utils import FaceAligner\n'), ((4719, 4742), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4740, 4742), False, 'import cv2\n'), ((4806, 4824), 'numpy.zeros', 'np.zeros', (['(1, 128)'], {}), '((1, 128))\n', (4814, 4824), True, 'import numpy as np\n'), ((5505, 5538), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 1)'}), '(bbox_to_anchor=(1, 1))\n', (5515, 5538), True, 'import matplotlib.pyplot as plt\n'), ((5541, 5585), 'matplotlib.rcParams.update', 'rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (5556, 5585), False, 'from matplotlib import rcParams\n'), ((5587, 5605), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5603, 5605), True, 'import matplotlib.pyplot as plt\n'), ((5608, 5686), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./recognition/static/recognition/img/training_visualisation.png"""'], {}), "('./recognition/static/recognition/img/training_visualisation.png')\n", (5619, 5686), True, 'import matplotlib.pyplot as plt\n'), ((5688, 5699), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5697, 5699), True, 'import matplotlib.pyplot as plt\n'), ((5751, 5772), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (5770, 5772), False, 'import datetime\n'), ((5779, 5802), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5800, 5802), False, 'import datetime\n'), ((6386, 6407), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (6405, 6407), False, 'import datetime\n'), ((6414, 6437), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6435, 6437), False, 'import datetime\n'), ((7401, 7413), 'math.ceil', 'math.ceil', (['m'], {}), '(m)\n', (7410, 7413), False, 'import math\n'), ((7540, 7572), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (7570, 7572), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((8577, 8591), 'django_pandas.io.read_frame', 'read_frame', (['qs'], {}), '(qs)\n', (8587, 8591), False, 'from django_pandas.io import read_frame\n'), ((8668, 8709), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'df', 'x': '"""date"""', 'y': '"""hours"""'}), "(data=df, x='date', y='hours')\n", (8679, 8709), True, 'import seaborn as sns\n'), ((8709, 8740), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (8719, 8740), True, 'import matplotlib.pyplot as plt\n'), ((8742, 8786), 'matplotlib.rcParams.update', 'rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (8757, 8786), False, 'from matplotlib import rcParams\n'), ((8788, 8806), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8804, 8806), True, 'import matplotlib.pyplot as plt\n'), ((9114, 9146), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (9144, 9146), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((10131, 10145), 'django_pandas.io.read_frame', 'read_frame', (['qs'], {}), '(qs)\n', (10141, 10145), False, 'from django_pandas.io import read_frame\n'), ((10234, 10279), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'df', 'x': '"""username"""', 'y': '"""hours"""'}), "(data=df, x='username', y='hours')\n", (10245, 10279), True, 'import seaborn as sns\n'), ((10279, 10310), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (10289, 10310), True, 'import matplotlib.pyplot as plt\n'), ((10312, 10356), 'matplotlib.rcParams.update', 'rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (10327, 10356), False, 'from matplotlib import rcParams\n'), ((10358, 10376), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10374, 10376), True, 'import matplotlib.pyplot as plt\n'), ((10378, 10481), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./recognition/static/recognition/img/attendance_graphs/hours_vs_employee/1.png"""'], {}), "(\n './recognition/static/recognition/img/attendance_graphs/hours_vs_employee/1.png'\n )\n", (10389, 10481), True, 'import matplotlib.pyplot as plt\n'), ((10473, 10484), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10482, 10484), True, 'import matplotlib.pyplot as plt\n'), ((10532, 10550), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (10548, 10550), False, 'from django.contrib.auth.models import User\n'), ((10641, 10662), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (10660, 10662), False, 'import datetime\n'), ((10792, 10813), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (10811, 10813), False, 'import datetime\n'), ((11619, 11633), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11631, 11633), True, 'import pandas as pd\n'), ((11704, 11760), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'df', 'x': '"""date"""', 'y': '"""Number of employees"""'}), "(data=df, x='date', y='Number of employees')\n", (11716, 11760), True, 'import seaborn as sns\n'), ((11760, 11850), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./recognition/static/recognition/img/attendance_graphs/this_week/1.png"""'], {}), "(\n './recognition/static/recognition/img/attendance_graphs/this_week/1.png')\n", (11771, 11850), True, 'import matplotlib.pyplot as plt\n'), ((11847, 11858), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11856, 11858), True, 'import matplotlib.pyplot as plt\n'), ((11913, 11934), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (11932, 11934), False, 'import datetime\n'), ((12755, 12769), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12767, 12769), True, 'import pandas as pd\n'), ((12833, 12879), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'df', 'x': '"""date"""', 'y': '"""emp_count"""'}), "(data=df, x='date', y='emp_count')\n", (12845, 12879), True, 'import seaborn as sns\n'), ((12879, 12969), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./recognition/static/recognition/img/attendance_graphs/last_week/1.png"""'], {}), "(\n './recognition/static/recognition/img/attendance_graphs/last_week/1.png')\n", (12890, 12969), True, 'import matplotlib.pyplot as plt\n'), ((12966, 12977), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12975, 12977), True, 'import matplotlib.pyplot as plt\n'), ((13042, 13082), 'django.shortcuts.render', 'render', (['request', '"""recognition/home.html"""'], {}), "(request, 'recognition/home.html')\n", (13048, 13082), False, 'from django.shortcuts import render, redirect\n'), ((14006, 14038), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (14036, 14038), False, 'import dlib\n'), ((14054, 14142), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""face_recognition_data/shape_predictor_68_face_landmarks.dat"""'], {}), "(\n 'face_recognition_data/shape_predictor_68_face_landmarks.dat')\n", (14074, 14142), False, 'import dlib\n'), ((14334, 14377), 'imutils.face_utils.FaceAligner', 'FaceAligner', (['predictor'], {'desiredFaceWidth': '(96)'}), '(predictor, desiredFaceWidth=96)\n', (14345, 14377), False, 'from imutils.face_utils import FaceAligner\n'), ((14390, 14404), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (14402, 14404), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((14425, 14469), 'numpy.load', 'np.load', (['"""face_recognition_data/classes.npy"""'], {}), "('face_recognition_data/classes.npy')\n", (14432, 14469), True, 'import numpy as np\n'), ((14491, 14509), 'numpy.zeros', 'np.zeros', (['(1, 128)'], {}), '((1, 128))\n', (14499, 14509), True, 'import numpy as np\n'), ((16648, 16671), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (16669, 16671), False, 'import cv2\n'), ((16717, 16733), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (16725, 16733), False, 'from django.shortcuts import render, redirect\n'), ((16793, 16825), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (16823, 16825), False, 'import dlib\n'), ((16841, 16929), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""face_recognition_data/shape_predictor_68_face_landmarks.dat"""'], {}), "(\n 'face_recognition_data/shape_predictor_68_face_landmarks.dat')\n", (16861, 16929), False, 'import dlib\n'), ((17121, 17164), 'imutils.face_utils.FaceAligner', 'FaceAligner', (['predictor'], {'desiredFaceWidth': '(96)'}), '(predictor, desiredFaceWidth=96)\n', (17132, 17164), False, 'from imutils.face_utils import FaceAligner\n'), ((17177, 17191), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (17189, 17191), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((17212, 17256), 'numpy.load', 'np.load', (['"""face_recognition_data/classes.npy"""'], {}), "('face_recognition_data/classes.npy')\n", (17219, 17256), True, 'import numpy as np\n'), ((17278, 17296), 'numpy.zeros', 'np.zeros', (['(1, 128)'], {}), '((1, 128))\n', (17286, 17296), True, 'import numpy as np\n'), ((19435, 19458), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (19456, 19458), False, 'import cv2\n'), ((19505, 19521), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (19513, 19521), False, 'from django.shortcuts import render, redirect\n'), ((19725, 19749), 'os.listdir', 'os.listdir', (['training_dir'], {}), '(training_dir)\n', (19735, 19749), False, 'import os\n'), ((19969, 19993), 'os.listdir', 'os.listdir', (['training_dir'], {}), '(training_dir)\n', (19979, 19993), False, 'import os\n'), ((20437, 20448), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (20445, 20448), True, 'import numpy as np\n'), ((20460, 20474), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (20472, 20474), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((20519, 20530), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (20527, 20530), True, 'import numpy as np\n'), ((20565, 20627), 'numpy.save', 'np.save', (['"""face_recognition_data/classes.npy"""', 'encoder.classes_'], {}), "('face_recognition_data/classes.npy', encoder.classes_)\n", (20572, 20627), True, 'import numpy as np\n'), ((20635, 20673), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""', 'probability': '(True)'}), "(kernel='linear', probability=True)\n", (20638, 20673), False, 'from sklearn.svm import SVC\n'), ((20828, 20876), 'django.contrib.messages.success', 'messages.success', (['request', 'f"""Training Complete."""'], {}), "(request, f'Training Complete.')\n", (20844, 20876), False, 'from django.contrib import messages\n'), ((20886, 20927), 'django.shortcuts.render', 'render', (['request', '"""recognition/train.html"""'], {}), "(request, 'recognition/train.html')\n", (20892, 20927), False, 'from django.shortcuts import render, redirect\n'), ((20982, 21032), 'django.shortcuts.render', 'render', (['request', '"""recognition/not_authorised.html"""'], {}), "(request, 'recognition/not_authorised.html')\n", (20988, 21032), False, 'from django.shortcuts import render, redirect\n'), ((21244, 21389), 'django.shortcuts.render', 'render', (['request', '"""recognition/view_attendance_home.html"""', "{'total_num_of_emp': total_num_of_emp, 'emp_present_today': emp_present_today}"], {}), "(request, 'recognition/view_attendance_home.html', {\n 'total_num_of_emp': total_num_of_emp, 'emp_present_today':\n emp_present_today})\n", (21250, 21389), False, 'from django.shortcuts import render, redirect\n'), ((2604, 2636), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(800)'}), '(frame, width=800)\n', (2618, 2636), False, 'import imutils\n'), ((2764, 2803), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (2776, 2803), False, 'import cv2\n'), ((4416, 4447), 'cv2.imshow', 'cv2.imshow', (['"""Add Images"""', 'frame'], {}), "('Add Images', frame)\n", (4426, 4447), False, 'import cv2\n'), ((4578, 4592), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4589, 4592), False, 'import cv2\n'), ((4849, 4894), 'face_recognition.face_locations', 'face_recognition.face_locations', (['face_aligned'], {}), '(face_aligned)\n', (4880, 4894), False, 'import face_recognition\n'), ((4913, 5002), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['face_aligned'], {'known_face_locations': 'x_face_locations'}), '(face_aligned, known_face_locations=\n x_face_locations)\n', (4944, 5002), False, 'import face_recognition\n'), ((5442, 5502), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_embedded[idx, 0]', 'X_embedded[idx, 1]'], {'label': 't'}), '(X_embedded[idx, 0], X_embedded[idx, 1], label=t)\n', (5453, 5502), True, 'import matplotlib.pyplot as plt\n'), ((5834, 5867), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'person'}), '(username=person)\n', (5850, 5867), False, 'from django.contrib.auth.models import User\n'), ((6469, 6502), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'person'}), '(username=person)\n', (6485, 6502), False, 'from django.contrib.auth.models import User\n'), ((8821, 8920), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./recognition/static/recognition/img/attendance_graphs/hours_vs_date/1.png"""'], {}), "(\n './recognition/static/recognition/img/attendance_graphs/hours_vs_date/1.png'\n )\n", (8832, 8920), True, 'import matplotlib.pyplot as plt\n'), ((8913, 8924), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8922, 8924), True, 'import matplotlib.pyplot as plt\n'), ((8934, 9034), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./recognition/static/recognition/img/attendance_graphs/employee_login/1.png"""'], {}), "(\n './recognition/static/recognition/img/attendance_graphs/employee_login/1.png'\n )\n", (8945, 9034), True, 'import matplotlib.pyplot as plt\n'), ((9027, 9038), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9036, 9038), True, 'import matplotlib.pyplot as plt\n'), ((10840, 10866), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (10858, 10866), False, 'import datetime\n'), ((11021, 11047), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (11039, 11047), False, 'import datetime\n'), ((11961, 11987), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (11979, 11987), False, 'import datetime\n'), ((12142, 12168), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (12160, 12168), False, 'import datetime\n'), ((13187, 13238), 'django.shortcuts.render', 'render', (['request', '"""recognition/admin_dashboard.html"""'], {}), "(request, 'recognition/admin_dashboard.html')\n", (13193, 13238), False, 'from django.shortcuts import render, redirect\n'), ((13277, 13331), 'django.shortcuts.render', 'render', (['request', '"""recognition/employee_dashboard.html"""'], {}), "(request, 'recognition/employee_dashboard.html')\n", (13283, 13331), False, 'from django.shortcuts import render, redirect\n'), ((13418, 13444), 'django.shortcuts.redirect', 'redirect', (['"""not-authorised"""'], {}), "('not-authorised')\n", (13426, 13444), False, 'from django.shortcuts import render, redirect\n'), ((13886, 13948), 'django.shortcuts.render', 'render', (['request', '"""recognition/add_photos.html"""', "{'form': form}"], {}), "(request, 'recognition/add_photos.html', {'form': form})\n", (13892, 13948), False, 'from django.shortcuts import render, redirect\n'), ((14313, 14327), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14324, 14327), False, 'import pickle\n'), ((14872, 14904), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(800)'}), '(frame, width=800)\n', (14886, 14904), False, 'import imutils\n'), ((14925, 14964), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (14937, 14964), False, 'import cv2\n'), ((16286, 16345), 'cv2.imshow', 'cv2.imshow', (['"""Mark Attendance - In - Press q to exit"""', 'frame'], {}), "('Mark Attendance - In - Press q to exit', frame)\n", (16296, 16345), False, 'import cv2\n'), ((17100, 17114), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (17111, 17114), False, 'import pickle\n'), ((17659, 17691), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(800)'}), '(frame, width=800)\n', (17673, 17691), False, 'import imutils\n'), ((17712, 17751), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (17724, 17751), False, 'import cv2\n'), ((19073, 19132), 'cv2.imshow', 'cv2.imshow', (['"""Mark Attendance- Out - Press q to exit"""', 'frame'], {}), "('Mark Attendance- Out - Press q to exit', frame)\n", (19083, 19132), False, 'import cv2\n'), ((19607, 19633), 'django.shortcuts.redirect', 'redirect', (['"""not-authorised"""'], {}), "('not-authorised')\n", (19615, 19633), False, 'from django.shortcuts import render, redirect\n'), ((19768, 19807), 'os.path.join', 'os.path.join', (['training_dir', 'person_name'], {}), '(training_dir, person_name)\n', (19780, 19807), False, 'import os\n'), ((19878, 19915), 'face_recognition.face_recognition_cli.image_files_in_folder', 'image_files_in_folder', (['curr_directory'], {}), '(curr_directory)\n', (19899, 19915), False, 'from face_recognition.face_recognition_cli import image_files_in_folder\n'), ((20038, 20077), 'os.path.join', 'os.path.join', (['training_dir', 'person_name'], {}), '(training_dir, person_name)\n', (20050, 20077), False, 'import os\n'), ((20148, 20185), 'face_recognition.face_recognition_cli.image_files_in_folder', 'image_files_in_folder', (['curr_directory'], {}), '(curr_directory)\n', (20169, 20185), False, 'from face_recognition.face_recognition_cli import image_files_in_folder\n'), ((20775, 20794), 'pickle.dump', 'pickle.dump', (['svc', 'f'], {}), '(svc, f)\n', (20786, 20794), False, 'import pickle\n'), ((21479, 21505), 'django.shortcuts.redirect', 'redirect', (['"""not-authorised"""'], {}), "('not-authorised')\n", (21487, 21505), False, 'from django.shortcuts import render, redirect\n'), ((22158, 22244), 'django.shortcuts.render', 'render', (['request', '"""recognition/view_attendance_date.html"""', "{'form': form, 'qs': qs}"], {}), "(request, 'recognition/view_attendance_date.html', {'form': form,\n 'qs': qs})\n", (22164, 22244), False, 'from django.shortcuts import render, redirect\n'), ((22344, 22370), 'django.shortcuts.redirect', 'redirect', (['"""not-authorised"""'], {}), "('not-authorised')\n", (22352, 22370), False, 'from django.shortcuts import render, redirect\n'), ((23766, 23856), 'django.shortcuts.render', 'render', (['request', '"""recognition/view_attendance_employee.html"""', "{'form': form, 'qs': qs}"], {}), "(request, 'recognition/view_attendance_employee.html', {'form': form,\n 'qs': qs})\n", (23772, 23856), False, 'from django.shortcuts import render, redirect\n'), ((23966, 23992), 'django.shortcuts.redirect', 'redirect', (['"""not-authorised"""'], {}), "('not-authorised')\n", (23974, 23992), False, 'from django.shortcuts import render, redirect\n'), ((25081, 25181), 'django.shortcuts.render', 'render', (['request', '"""recognition/view_my_attendance_employee_login.html"""', "{'form': form, 'qs': qs}"], {}), "(request, 'recognition/view_my_attendance_employee_login.html', {\n 'form': form, 'qs': qs})\n", (25087, 25181), False, 'from django.shortcuts import render, redirect\n'), ((1278, 1316), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username': 'username'}), '(username=username)\n', (1297, 1316), False, 'from django.contrib.auth.models import User\n'), ((2170, 2188), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (2181, 2188), False, 'from imutils.video import VideoStream\n'), ((3230, 3257), 'imutils.face_utils.rect_to_bb', 'face_utils.rect_to_bb', (['face'], {}), '(face)\n', (3251, 3257), False, 'from imutils import face_utils\n'), ((3839, 3878), 'imutils.resize', 'imutils.resize', (['face_aligned'], {'width': '(400)'}), '(face_aligned, width=400)\n', (3853, 3878), False, 'import imutils\n'), ((4133, 4193), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(1)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)\n', (4146, 4193), False, 'import cv2\n'), ((4291, 4306), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (4302, 4306), False, 'import cv2\n'), ((5148, 5164), 'numpy.amax', 'np.amax', (['prob[0]'], {}), '(prob[0])\n', (5155, 5164), True, 'import numpy as np\n'), ((5335, 5355), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (5339, 5355), False, 'from sklearn.manifold import TSNE\n'), ((5883, 5925), 'users.models.Present.objects.get', 'Present.objects.get', ([], {'user': 'user', 'date': 'today'}), '(user=user, date=today)\n', (5902, 5925), False, 'from users.models import Present, Time\n'), ((6264, 6313), 'users.models.Time', 'Time', ([], {'user': 'user', 'date': 'today', 'time': 'time', 'out': '(False)'}), '(user=user, date=today, time=time, out=False)\n', (6268, 6313), False, 'from users.models import Present, Time\n'), ((6536, 6584), 'users.models.Time', 'Time', ([], {'user': 'user', 'date': 'today', 'time': 'time', 'out': '(True)'}), '(user=user, date=today, time=time, out=True)\n', (6540, 6584), False, 'from users.models import Present, Time\n'), ((10667, 10701), 'users.models.Present.objects.filter', 'Present.objects.filter', ([], {'date': 'today'}), '(date=today)\n', (10689, 10701), False, 'from users.models import Present, Time\n'), ((11052, 11105), 'users.models.Present.objects.filter', 'Present.objects.filter', ([], {'date__gte': 'monday_of_this_week'}), '(date__gte=monday_of_this_week)\n', (11074, 11105), False, 'from users.models import Present, Time\n'), ((12173, 12226), 'users.models.Present.objects.filter', 'Present.objects.filter', ([], {'date__gte': 'monday_of_last_week'}), '(date__gte=monday_of_last_week)\n', (12195, 12226), False, 'from users.models import Present, Time\n'), ((13632, 13677), 'django.contrib.messages.success', 'messages.success', (['request', 'f"""Dataset Created"""'], {}), "(request, f'Dataset Created')\n", (13648, 13677), False, 'from django.contrib import messages\n'), ((13688, 13710), 'django.shortcuts.redirect', 'redirect', (['"""add-photos"""'], {}), "('add-photos')\n", (13696, 13710), False, 'from django.shortcuts import render, redirect\n'), ((13722, 13811), 'django.contrib.messages.warning', 'messages.warning', (['request', 'f"""No such username found. Please register employee first."""'], {}), "(request,\n f'No such username found. Please register employee first.')\n", (13738, 13811), False, 'from django.contrib import messages\n'), ((13818, 13839), 'django.shortcuts.redirect', 'redirect', (['"""dashboard"""'], {}), "('dashboard')\n", (13826, 13839), False, 'from django.shortcuts import render, redirect\n'), ((14776, 14794), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (14787, 14794), False, 'from imutils.video import VideoStream\n'), ((15080, 15107), 'imutils.face_utils.rect_to_bb', 'face_utils.rect_to_bb', (['face'], {}), '(face)\n', (15101, 15107), False, 'from imutils import face_utils\n'), ((15162, 15222), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(1)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)\n', (15175, 15222), False, 'import cv2\n'), ((16524, 16539), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (16535, 16539), False, 'import cv2\n'), ((17563, 17581), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (17574, 17581), False, 'from imutils.video import VideoStream\n'), ((17867, 17894), 'imutils.face_utils.rect_to_bb', 'face_utils.rect_to_bb', (['face'], {}), '(face)\n', (17888, 17894), False, 'from imutils import face_utils\n'), ((17949, 18009), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(1)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)\n', (17962, 18009), False, 'import cv2\n'), ((19311, 19326), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (19322, 19326), False, 'import cv2\n'), ((19816, 19845), 'os.path.isdir', 'os.path.isdir', (['curr_directory'], {}), '(curr_directory)\n', (19829, 19845), False, 'import os\n'), ((20086, 20115), 'os.path.isdir', 'os.path.isdir', (['curr_directory'], {}), '(curr_directory)\n', (20099, 20115), False, 'import os\n'), ((20221, 20242), 'cv2.imread', 'cv2.imread', (['imagefile'], {}), '(imagefile)\n', (20231, 20242), False, 'import cv2\n'), ((21706, 21736), 'users.models.Time.objects.filter', 'Time.objects.filter', ([], {'date': 'date'}), '(date=date)\n', (21725, 21736), False, 'from users.models import Present, Time\n'), ((21751, 21784), 'users.models.Present.objects.filter', 'Present.objects.filter', ([], {'date': 'date'}), '(date=date)\n', (21773, 21784), False, 'from users.models import Present, Time\n'), ((24144, 24171), 'users.models.Time.objects.filter', 'Time.objects.filter', ([], {'user': 'u'}), '(user=u)\n', (24163, 24171), False, 'from users.models import Present, Time\n'), ((24186, 24216), 'users.models.Present.objects.filter', 'Present.objects.filter', ([], {'user': 'u'}), '(user=u)\n', (24208, 24216), False, 'from users.models import Present, Time\n'), ((6005, 6049), 'users.models.Present', 'Present', ([], {'user': 'user', 'date': 'today', 'present': '(True)'}), '(user=user, date=today, present=True)\n', (6012, 6049), False, 'from users.models import Present, Time\n'), ((6078, 6123), 'users.models.Present', 'Present', ([], {'user': 'user', 'date': 'today', 'present': '(False)'}), '(user=user, date=today, present=False)\n', (6085, 6123), False, 'from users.models import Present, Time\n'), ((11273, 11306), 'users.models.Present.objects.filter', 'Present.objects.filter', ([], {'date': 'date'}), '(date=date)\n', (11295, 11306), False, 'from users.models import Present, Time\n'), ((11405, 11433), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'cnt'}), '(days=cnt)\n', (11423, 11433), False, 'import datetime\n'), ((12405, 12438), 'users.models.Present.objects.filter', 'Present.objects.filter', ([], {'date': 'date'}), '(date=date)\n', (12427, 12438), False, 'from users.models import Present, Time\n'), ((12537, 12565), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'cnt'}), '(days=cnt)\n', (12555, 12565), False, 'import datetime\n'), ((21898, 21984), 'django.shortcuts.render', 'render', (['request', '"""recognition/view_attendance_date.html"""', "{'form': form, 'qs': qs}"], {}), "(request, 'recognition/view_attendance_date.html', {'form': form,\n 'qs': qs})\n", (21904, 21984), False, 'from django.shortcuts import render, redirect\n'), ((21995, 22054), 'django.contrib.messages.warning', 'messages.warning', (['request', 'f"""No records for selected date."""'], {}), "(request, f'No records for selected date.')\n", (22011, 22054), False, 'from django.contrib import messages\n'), ((22066, 22098), 'django.shortcuts.redirect', 'redirect', (['"""view-attendance-date"""'], {}), "('view-attendance-date')\n", (22074, 22098), False, 'from django.shortcuts import render, redirect\n'), ((22594, 22629), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'username'}), '(username=username)\n', (22610, 22629), False, 'from django.contrib.auth.models import User\n'), ((22647, 22674), 'users.models.Time.objects.filter', 'Time.objects.filter', ([], {'user': 'u'}), '(user=u)\n', (22666, 22674), False, 'from users.models import Present, Time\n'), ((22690, 22720), 'users.models.Present.objects.filter', 'Present.objects.filter', ([], {'user': 'u'}), '(user=u)\n', (22712, 22720), False, 'from users.models import Present, Time\n'), ((23611, 23664), 'django.contrib.messages.warning', 'messages.warning', (['request', 'f"""No such username found."""'], {}), "(request, f'No such username found.')\n", (23627, 23664), False, 'from django.contrib import messages\n'), ((23676, 23712), 'django.shortcuts.redirect', 'redirect', (['"""view-attendance-employee"""'], {}), "('view-attendance-employee')\n", (23684, 23712), False, 'from django.shortcuts import render, redirect\n'), ((24341, 24394), 'django.contrib.messages.warning', 'messages.warning', (['request', 'f"""Invalid date selection."""'], {}), "(request, f'Invalid date selection.')\n", (24357, 24394), False, 'from django.contrib import messages\n'), ((24407, 24452), 'django.shortcuts.redirect', 'redirect', (['"""view-my-attendance-employee-login"""'], {}), "('view-my-attendance-employee-login')\n", (24415, 24452), False, 'from django.shortcuts import render, redirect\n'), ((15423, 15434), 'time.time', 'time.time', ([], {}), '()\n', (15432, 15434), False, 'import time\n'), ((15675, 15698), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15696, 15698), False, 'import datetime\n'), ((18210, 18221), 'time.time', 'time.time', ([], {}), '()\n', (18219, 18221), False, 'import time\n'), ((18462, 18485), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (18483, 18485), False, 'import datetime\n'), ((20400, 20420), 'os.remove', 'os.remove', (['imagefile'], {}), '(imagefile)\n', (20409, 20420), False, 'import os\n'), ((22853, 22906), 'django.contrib.messages.warning', 'messages.warning', (['request', 'f"""Invalid date selection."""'], {}), "(request, f'Invalid date selection.')\n", (22869, 22906), False, 'from django.contrib import messages\n'), ((22919, 22955), 'django.shortcuts.redirect', 'redirect', (['"""view-attendance-employee"""'], {}), "('view-attendance-employee')\n", (22927, 22955), False, 'from django.shortcuts import render, redirect\n'), ((24796, 24896), 'django.shortcuts.render', 'render', (['request', '"""recognition/view_my_attendance_employee_login.html"""', "{'form': form, 'qs': qs}"], {}), "(request, 'recognition/view_my_attendance_employee_login.html', {\n 'form': form, 'qs': qs})\n", (24802, 24896), False, 'from django.shortcuts import render, redirect\n'), ((24916, 24979), 'django.contrib.messages.warning', 'messages.warning', (['request', 'f"""No records for selected duration."""'], {}), "(request, f'No records for selected duration.')\n", (24932, 24979), False, 'from django.contrib import messages\n'), ((24993, 25038), 'django.shortcuts.redirect', 'redirect', (['"""view-my-attendance-employee-login"""'], {}), "('view-my-attendance-employee-login')\n", (25001, 25038), False, 'from django.shortcuts import render, redirect\n'), ((15337, 15353), 'numpy.ravel', 'np.ravel', (['[pred]'], {}), '([pred])\n', (15345, 15353), True, 'import numpy as np\n'), ((18124, 18140), 'numpy.ravel', 'np.ravel', (['[pred]'], {}), '([pred])\n', (18132, 18140), True, 'import numpy as np\n'), ((23300, 23390), 'django.shortcuts.render', 'render', (['request', '"""recognition/view_attendance_employee.html"""', "{'form': form, 'qs': qs}"], {}), "(request, 'recognition/view_attendance_employee.html', {'form': form,\n 'qs': qs})\n", (23306, 23390), False, 'from django.shortcuts import render, redirect\n'), ((23438, 23501), 'django.contrib.messages.warning', 'messages.warning', (['request', 'f"""No records for selected duration."""'], {}), "(request, f'No records for selected duration.')\n", (23454, 23501), False, 'from django.contrib import messages\n'), ((23515, 23551), 'django.shortcuts.redirect', 'redirect', (['"""view-attendance-employee"""'], {}), "('view-attendance-employee')\n", (23523, 23551), False, 'from django.shortcuts import render, redirect\n'), ((15506, 15517), 'time.time', 'time.time', ([], {}), '()\n', (15515, 15517), False, 'import time\n'), ((18293, 18304), 'time.time', 'time.time', ([], {}), '()\n', (18302, 18304), False, 'import time\n'), ((20265, 20303), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['image'], {}), '(image)\n', (20296, 20303), False, 'import face_recognition\n')] |
import re
import string
DATA = '05.txt'
def react(polymer):
pairs = '|'.join([a + b + '|' + b + a for a, b in zip(string.ascii_lowercase, string.ascii_uppercase)])
length = len(polymer)
while 1:
polymer = re.sub(pairs, '', polymer)
if len(polymer) == length:
return(length)
else:
length = len(polymer)
def code1():
with open(DATA) as f:
polymer = f.readline().strip()
print('1>', react(polymer))
def code2():
with open(DATA) as f:
polymer = f.readline().strip()
minlength = len(polymer)
for c in string.ascii_lowercase:
polymer2 = re.sub(c, '', polymer, flags=re.I)
length = react(polymer2)
if length < minlength:
minlength = length
print('2>', minlength)
code1()
code2()
| [
"re.sub"
]
| [((240, 266), 're.sub', 're.sub', (['pairs', '""""""', 'polymer'], {}), "(pairs, '', polymer)\n", (246, 266), False, 'import re\n'), ((672, 706), 're.sub', 're.sub', (['c', '""""""', 'polymer'], {'flags': 're.I'}), "(c, '', polymer, flags=re.I)\n", (678, 706), False, 'import re\n')] |
import json
import cherrypy
import engine
class WebServer(object):
@cherrypy.expose
def index(self):
return open('public/index.html', encoding='utf-8')
@cherrypy.expose
class GetOptionsService(object):
@cherrypy.tools.accept(media='text/plain')
def GET(self):
return json.dumps({
'providers': engine.get_providers(),
'algorithms': engine.get_algorithms(),
'default_datasets': engine.get_all_default_datasets()
})
@cherrypy.expose
class SetOptionsService(object):
@cherrypy.tools.accept(media='text/plain')
def POST(self, options):
""" Use the options selected by the user to execute all algorithms
:param options: {
is_default_dataset: bool,
dataset: str,
providers: []
algorithms: []
target: str
}
if is_default_dataset is true, dataset will contain the name of the default_dataset"""
options_dic = json.loads(options)
try:
result = engine.execute(options_dic['is_default_dataset'], options_dic['dataset'], options_dic['providers'],
options_dic['algorithms'],
options_dic['target'])
except Exception as exception:
message = f"{str(exception)}"
raise cherrypy.HTTPError(500, message=message)
return result
@cherrypy.expose
@cherrypy.tools.json_out()
class GetDefaultDatasetHeadersService(object):
@cherrypy.tools.accept(media='text/plain')
def GET(self, default_dataset_name):
return {'headers': engine.get_default_dataset_headers(default_dataset_name)}
| [
"engine.get_default_dataset_headers",
"cherrypy.tools.json_out",
"cherrypy.tools.accept",
"json.loads",
"engine.get_all_default_datasets",
"engine.get_providers",
"engine.execute",
"engine.get_algorithms",
"cherrypy.HTTPError"
]
| [((1500, 1525), 'cherrypy.tools.json_out', 'cherrypy.tools.json_out', ([], {}), '()\n', (1523, 1525), False, 'import cherrypy\n'), ((230, 271), 'cherrypy.tools.accept', 'cherrypy.tools.accept', ([], {'media': '"""text/plain"""'}), "(media='text/plain')\n", (251, 271), False, 'import cherrypy\n'), ((553, 594), 'cherrypy.tools.accept', 'cherrypy.tools.accept', ([], {'media': '"""text/plain"""'}), "(media='text/plain')\n", (574, 594), False, 'import cherrypy\n'), ((1578, 1619), 'cherrypy.tools.accept', 'cherrypy.tools.accept', ([], {'media': '"""text/plain"""'}), "(media='text/plain')\n", (1599, 1619), False, 'import cherrypy\n'), ((1041, 1060), 'json.loads', 'json.loads', (['options'], {}), '(options)\n', (1051, 1060), False, 'import json\n'), ((1096, 1249), 'engine.execute', 'engine.execute', (["options_dic['is_default_dataset']", "options_dic['dataset']", "options_dic['providers']", "options_dic['algorithms']", "options_dic['target']"], {}), "(options_dic['is_default_dataset'], options_dic['dataset'],\n options_dic['providers'], options_dic['algorithms'], options_dic['target'])\n", (1110, 1249), False, 'import engine\n'), ((1688, 1744), 'engine.get_default_dataset_headers', 'engine.get_default_dataset_headers', (['default_dataset_name'], {}), '(default_dataset_name)\n', (1722, 1744), False, 'import engine\n'), ((344, 366), 'engine.get_providers', 'engine.get_providers', ([], {}), '()\n', (364, 366), False, 'import engine\n'), ((394, 417), 'engine.get_algorithms', 'engine.get_algorithms', ([], {}), '()\n', (415, 417), False, 'import engine\n'), ((451, 484), 'engine.get_all_default_datasets', 'engine.get_all_default_datasets', ([], {}), '()\n', (482, 484), False, 'import engine\n'), ((1417, 1457), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(500)'], {'message': 'message'}), '(500, message=message)\n', (1435, 1457), False, 'import cherrypy\n')] |
from typing import Union
from tuprolog import logger
# noinspection PyUnresolvedReferences
import jpype.imports
# noinspection PyUnresolvedReferences
import it.unibo.tuprolog.solve.exception.error as errors
from tuprolog.core import Term, Atom
from tuprolog.solve import ExecutionContext, Signature
ExistenceError = errors.ExistenceError
ObjectType = ExistenceError.ObjectType
OBJECT_PROCEDURE = ObjectType.PROCEDURE
OBJECT_SOURCE_SINK = ObjectType.SOURCE_SINK
OBJECT_RESOURCE = ObjectType.RESOURCE
OBJECT_STREAM = ObjectType.STREAM
OBJECT_OOP_ALIAS = ObjectType.OOP_ALIAS
OBJECT_OOP_METHOD = ObjectType.OOP_METHOD
OBJECT_OOP_CONSTRUCTOR = ObjectType.OOP_CONSTRUCTOR
OBJECT_OOP_PROPERTY = ObjectType.OOP_PROPERTY
def existence_error(
context: ExecutionContext,
type: ObjectType,
culprit: Term,
message: str
) -> ExistenceError:
return ExistenceError.of(context, type, culprit, message)
def existence_error_for_source_sink(
context: ExecutionContext,
alias: Union[Atom, str]
) -> ExistenceError:
return ExistenceError.forSourceSink(context, alias)
def existence_error_for_procedure(
context: ExecutionContext,
procedure: Signature
) -> ExistenceError:
return ExistenceError.forProcedure(context, procedure)
def existence_error_for_stream(
context: ExecutionContext,
stream: Term
) -> ExistenceError:
return ExistenceError.forStream(context, stream)
def existence_error_for_resource(
context: ExecutionContext,
name: str
) -> ExistenceError:
return ExistenceError.forResource(context, name)
def object_type(name: Union[str, Term]) -> ObjectType:
if isinstance(name, str):
return ObjectType.of(name)
else:
return ObjectType.fromTerm(name)
logger.debug("Loaded JVM classes from it.unibo.tuprolog.solve.exception.error.ExistenceError.*")
| [
"tuprolog.logger.debug"
]
| [((1800, 1906), 'tuprolog.logger.debug', 'logger.debug', (['"""Loaded JVM classes from it.unibo.tuprolog.solve.exception.error.ExistenceError.*"""'], {}), "(\n 'Loaded JVM classes from it.unibo.tuprolog.solve.exception.error.ExistenceError.*'\n )\n", (1812, 1906), False, 'from tuprolog import logger\n')] |
import pybullet as p
import pybullet_data
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from math import sqrt
import random
import time
import math
import cv2
import torch
import os
def random_crop(imgs, out):
"""
args:
imgs: shape (B,C,H,W)
out: output size (e.g. 84)
"""
n, c, h, w = imgs.shape
crop_max = h - out + 1
w1 = np.random.randint(0, crop_max, n)
h1 = np.random.randint(0, crop_max, n)
cropped = np.empty((n, c, out, out), dtype=imgs.dtype)
for i, (img, w11, h11) in enumerate(zip(imgs, w1, h1)):
cropped[i] = img[:, h11:h11 + out, w11:w11 + out]
return cropped
class KukaReachVisualEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
kMaxEpisodeSteps = 700
kImageSize = {'width': 96, 'height': 96}
kFinalImageSize = {'width': 84, 'height': 84}
def __init__(self, is_render=False, is_good_view=False):
self.is_render = is_render
self.is_good_view = is_good_view
if self.is_render:
p.connect(p.GUI)
else:
p.connect(p.DIRECT)
self.x_low_obs = 0.2
self.x_high_obs = 0.7
self.y_low_obs = -0.3
self.y_high_obs = 0.3
self.z_low_obs = 0
self.z_high_obs = 0.55
self.x_low_action = -0.4
self.x_high_action = 0.4
self.y_low_action = -0.4
self.y_high_action = 0.4
self.z_low_action = -0.6
self.z_high_action = 0.3
self.step_counter = 0
self.urdf_root_path = pybullet_data.getDataPath()
# lower limits for null space
self.lower_limits = [-.967, -2, -2.96, 0.19, -2.96, -2.09, -3.05]
# upper limits for null space
self.upper_limits = [.967, 2, 2.96, 2.29, 2.96, 2.09, 3.05]
# joint ranges for null space
self.joint_ranges = [5.8, 4, 5.8, 4, 5.8, 4, 6]
# restposes for null space
self.rest_poses = [0, 0, 0, 0.5 * math.pi, 0, -math.pi * 0.5 * 0.66, 0]
# joint damping coefficents
self.joint_damping = [
0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001
]
self.init_joint_positions = [
0.006418, 0.413184, -0.011401, -1.589317, 0.005379, 1.137684,
-0.006539
]
self.orientation = p.getQuaternionFromEuler(
[0., -math.pi, math.pi / 2.])
self.camera_parameters = {
'width': 960.,
'height': 720,
'fov': 60,
'near': 0.1,
'far': 100.,
'eye_position': [0.59, 0, 0.8],
'target_position': [0.55, 0, 0.05],
'camera_up_vector':
[1, 0, 0], # I really do not know the parameter's effect.
'light_direction': [
0.5, 0, 1
], # the direction is from the light source position to the origin of the world frame.
}
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=[0.55, 0, 0.05],
distance=.7,
yaw=90,
pitch=-70,
roll=0,
upAxisIndex=2)
self.projection_matrix = p.computeProjectionMatrixFOV(
fov=self.camera_parameters['fov'],
aspect=self.camera_parameters['width'] /
self.camera_parameters['height'],
nearVal=self.camera_parameters['near'],
farVal=self.camera_parameters['far'])
p.configureDebugVisualizer(lightPosition=[5, 0, 5])
p.resetDebugVisualizerCamera(cameraDistance=1.5,
cameraYaw=0,
cameraPitch=-40,
cameraTargetPosition=[0.55, -0.35, 0.2])
self.action_space = spaces.Box(low=np.array(
[self.x_low_action, self.y_low_action, self.z_low_action]),
high=np.array([
self.x_high_action,
self.y_high_action,
self.z_high_action
]),
dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=1,
shape=(1, self.kFinalImageSize['width'], self.kFinalImageSize['height']))
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.step_counter = 0
p.resetSimulation()
# p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
self.terminated = False
p.setGravity(0, 0, -10)
# 这些是周围那些白线,用来观察是否超过了obs的边界
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, 0],
lineToXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_high_obs, 0],
lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_low_obs, 0],
lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_high_obs, 0],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.loadURDF(os.path.join(self.urdf_root_path, "plane.urdf"),
basePosition=[0, 0, -0.65])
self.kuka_id = p.loadURDF(os.path.join(self.urdf_root_path,
"kuka_iiwa/model.urdf"),
useFixedBase=True)
table_uid = p.loadURDF(os.path.join(self.urdf_root_path,
"table/table.urdf"),
basePosition=[0.5, 0, -0.65])
p.changeVisualShape(table_uid, -1, rgbaColor=[1, 1, 1, 1])
self.object_id = p.loadURDF(os.path.join(self.urdf_root_path,
"random_urdfs/000/000.urdf"),
basePosition=[
random.uniform(self.x_low_obs,
self.x_high_obs),
random.uniform(self.y_low_obs,
self.y_high_obs), 0.01
])
self.num_joints = p.getNumJoints(self.kuka_id)
for i in range(self.num_joints):
p.resetJointState(
bodyUniqueId=self.kuka_id,
jointIndex=i,
targetValue=self.init_joint_positions[i],
)
self.robot_pos_obs = p.getLinkState(self.kuka_id,
self.num_joints - 1)[4]
p.stepSimulation()
(_, _, px, _,
_) = p.getCameraImage(width=960,
height=960,
viewMatrix=self.view_matrix,
projectionMatrix=self.projection_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
self.images = px
p.enableJointForceTorqueSensor(bodyUniqueId=self.kuka_id,
jointIndex=self.num_joints - 1,
enableSensor=True)
self.object_pos = p.getBasePositionAndOrientation(self.object_id)[0]
self.images = self.images[:, :, :
3] # the 4th channel is alpha channel, we do not need it.
return self._process_image(self.images)
def _process_image(self, image):
"""Convert the RGB pic to gray pic and add a channel 1
Args:
image ([type]): [description]
"""
if image is not None:
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = cv2.resize(image, (self.kImageSize['width'], self.kImageSize['height']))[None, :, :] / 255.
return image
else:
return np.zeros((1, self.kImageSize['width'], self.kImageSize['height']))
def step(self, action):
dv = 0.005
dx = action[0] * dv
dy = action[1] * dv
dz = action[2] * dv
self.current_pos = p.getLinkState(self.kuka_id, self.num_joints - 1)[4]
self.new_robot_pos = [
self.current_pos[0] + dx, self.current_pos[1] + dy,
self.current_pos[2] + dz
]
self.robot_joint_positions = p.calculateInverseKinematics(
bodyUniqueId=self.kuka_id,
endEffectorLinkIndex=self.num_joints - 1,
targetPosition=[
self.new_robot_pos[0], self.new_robot_pos[1],
self.new_robot_pos[2]
],
targetOrientation=self.orientation,
jointDamping=self.joint_damping,
)
for i in range(self.num_joints):
p.resetJointState(
bodyUniqueId=self.kuka_id,
jointIndex=i,
targetValue=self.robot_joint_positions[i],
)
p.stepSimulation()
# 在代码开始部分,如果定义了is_good_view,那么机械臂的动作会变慢,方便观察
if self.is_good_view:
time.sleep(0.05)
self.step_counter += 1
return self._reward()
def _reward(self):
# 一定注意是取第4个值,请参考pybullet手册的这个函数返回值的说明
self.robot_state = p.getLinkState(self.kuka_id, self.num_joints - 1)[4]
self.object_state = np.array(
p.getBasePositionAndOrientation(self.object_id)[0]).astype(
np.float32)
square_dx = (self.robot_state[0] - self.object_state[0]) ** 2
square_dy = (self.robot_state[1] - self.object_state[1]) ** 2
square_dz = (self.robot_state[2] - self.object_state[2]) ** 2
# 用机械臂末端和物体的距离作为奖励函数的依据
self.distance = sqrt(square_dx + square_dy + square_dz)
# print(self.distance)
x = self.robot_state[0]
y = self.robot_state[1]
z = self.robot_state[2]
# 如果机械比末端超过了obs的空间,也视为done,而且会给予一定的惩罚
terminated = bool(x < self.x_low_obs or x > self.x_high_obs
or y < self.y_low_obs or y > self.y_high_obs
or z < self.z_low_obs or z > self.z_high_obs)
if terminated:
reward = -0.1
self.terminated = True
# 如果机械臂一直无所事事,在最大步数还不能接触到物体,也需要给一定的惩罚
elif self.step_counter > self.kMaxEpisodeSteps:
reward = -0.1
self.terminated = True
elif self.distance < 0.1:
reward = 1
self.terminated = True
else:
reward = 0
self.terminated = False
info = {'distance:', self.distance}
(_, _, px, _,
_) = p.getCameraImage(width=960,
height=960,
viewMatrix=self.view_matrix,
projectionMatrix=self.projection_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
self.images = px
self.processed_image = self._process_image(self.images)
# self.observation=self.robot_state
self.observation = self.object_state
return self.processed_image, reward, self.terminated, info
def close(self):
p.disconnect()
def _get_force_sensor_value(self):
force_sensor_value = p.getJointState(bodyUniqueId=self.kuka_id,
jointIndex=self.num_joints -
1)[2][2]
# the first 2 stands for jointReactionForces, the second 2 stands for Fz,
# the pybullet methods' return is a tuple,so can not
# index it with str like dict. I think it can be improved
# that return value is a dict rather than tuple.
return force_sensor_value
class CustomSkipFrame(gym.Wrapper):
""" Make a 4 frame skip, so the observation space will change to (4,84,84) from (1,84,84)
Args:
gym ([type]): [description]
"""
def __init__(self, env, skip=4):
super(CustomSkipFrame, self).__init__(env)
self.observation_space = spaces.Box(low=0,
high=1,
shape=(skip, self.kFinalImageSize['width'], self.kFinalImageSize['height']))
self.skip = skip
def step(self, action):
total_reward = 0
states = []
state, reward, done, info = self.env.step(action)
for i in range(self.skip):
if not done:
state, reward, done, info = self.env.step(action)
total_reward += reward
states.append(state)
else:
states.append(state)
states = np.concatenate(states, 0)[None, :, :, :]
return random_crop(states.astype(np.float32), self.kFinalImageSize['width']), reward, done, info
def reset(self):
state = self.env.reset()
states = np.concatenate([state for _ in range(self.skip)],
0)[None, :, :, :]
return random_crop(states.astype(np.float32), self.kFinalImageSize['width'])
if __name__ == '__main__':
# 这一部分是做baseline,即让机械臂随机选择动作,看看能够得到的分数
import matplotlib.pyplot as plt
env = KukaReachVisualEnv(is_render=False)
env = CustomSkipFrame(env)
print(env.observation_space.shape)
print(env.action_space.shape)
print(env.action_space.n)
# for _ in range(20):
# action=env.action_space.sample()
# print(action)
# env.step(action)
#
# state = env.reset()
# print(state.shape)
# img = state[0][0]
# plt.imshow(img, cmap='gray')
# plt.show()
| [
"pybullet_data.getDataPath",
"math.sqrt",
"pybullet.computeViewMatrixFromYawPitchRoll",
"pybullet.setGravity",
"time.sleep",
"numpy.array",
"pybullet.disconnect",
"gym.utils.seeding.np_random",
"pybullet.connect",
"pybullet.addUserDebugLine",
"pybullet.getNumJoints",
"pybullet.getCameraImage",
"pybullet.getQuaternionFromEuler",
"numpy.empty",
"numpy.concatenate",
"pybullet.getJointState",
"pybullet.resetDebugVisualizerCamera",
"pybullet.resetSimulation",
"random.uniform",
"pybullet.configureDebugVisualizer",
"cv2.cvtColor",
"pybullet.enableJointForceTorqueSensor",
"cv2.resize",
"pybullet.computeProjectionMatrixFOV",
"pybullet.getLinkState",
"pybullet.resetJointState",
"pybullet.calculateInverseKinematics",
"pybullet.getBasePositionAndOrientation",
"os.path.join",
"pybullet.changeVisualShape",
"gym.spaces.Box",
"numpy.random.randint",
"numpy.zeros",
"pybullet.stepSimulation"
]
| [((431, 464), 'numpy.random.randint', 'np.random.randint', (['(0)', 'crop_max', 'n'], {}), '(0, crop_max, n)\n', (448, 464), True, 'import numpy as np\n'), ((475, 508), 'numpy.random.randint', 'np.random.randint', (['(0)', 'crop_max', 'n'], {}), '(0, crop_max, n)\n', (492, 508), True, 'import numpy as np\n'), ((524, 568), 'numpy.empty', 'np.empty', (['(n, c, out, out)'], {'dtype': 'imgs.dtype'}), '((n, c, out, out), dtype=imgs.dtype)\n', (532, 568), True, 'import numpy as np\n'), ((1698, 1725), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (1723, 1725), False, 'import pybullet_data\n'), ((2496, 2552), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0.0, -math.pi, math.pi / 2.0]'], {}), '([0.0, -math.pi, math.pi / 2.0])\n', (2520, 2552), True, 'import pybullet as p\n'), ((3141, 3274), 'pybullet.computeViewMatrixFromYawPitchRoll', 'p.computeViewMatrixFromYawPitchRoll', ([], {'cameraTargetPosition': '[0.55, 0, 0.05]', 'distance': '(0.7)', 'yaw': '(90)', 'pitch': '(-70)', 'roll': '(0)', 'upAxisIndex': '(2)'}), '(cameraTargetPosition=[0.55, 0, 0.05],\n distance=0.7, yaw=90, pitch=-70, roll=0, upAxisIndex=2)\n', (3176, 3274), True, 'import pybullet as p\n'), ((3385, 3611), 'pybullet.computeProjectionMatrixFOV', 'p.computeProjectionMatrixFOV', ([], {'fov': "self.camera_parameters['fov']", 'aspect': "(self.camera_parameters['width'] / self.camera_parameters['height'])", 'nearVal': "self.camera_parameters['near']", 'farVal': "self.camera_parameters['far']"}), "(fov=self.camera_parameters['fov'], aspect=self\n .camera_parameters['width'] / self.camera_parameters['height'], nearVal\n =self.camera_parameters['near'], farVal=self.camera_parameters['far'])\n", (3413, 3611), True, 'import pybullet as p\n'), ((3688, 3739), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', ([], {'lightPosition': '[5, 0, 5]'}), '(lightPosition=[5, 0, 5])\n', (3714, 3739), True, 'import pybullet as p\n'), ((3749, 3873), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', ([], {'cameraDistance': '(1.5)', 'cameraYaw': '(0)', 'cameraPitch': '(-40)', 'cameraTargetPosition': '[0.55, -0.35, 0.2]'}), '(cameraDistance=1.5, cameraYaw=0, cameraPitch=-\n 40, cameraTargetPosition=[0.55, -0.35, 0.2])\n', (3777, 3873), True, 'import pybullet as p\n'), ((4335, 4439), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': "(1, self.kFinalImageSize['width'], self.kFinalImageSize['height'])"}), "(low=0, high=1, shape=(1, self.kFinalImageSize['width'], self.\n kFinalImageSize['height']))\n", (4345, 4439), False, 'from gym import spaces\n'), ((4593, 4616), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (4610, 4616), False, 'from gym.utils import seeding\n'), ((4706, 4725), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (4723, 4725), True, 'import pybullet as p\n'), ((4833, 4856), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (4845, 4856), True, 'import pybullet as p\n'), ((4905, 5037), 'pybullet.addUserDebugLine', 'p.addUserDebugLine', ([], {'lineFromXYZ': '[self.x_low_obs, self.y_low_obs, 0]', 'lineToXYZ': '[self.x_low_obs, self.y_low_obs, self.z_high_obs]'}), '(lineFromXYZ=[self.x_low_obs, self.y_low_obs, 0],\n lineToXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs])\n', (4923, 5037), True, 'import pybullet as p\n'), ((5070, 5204), 'pybullet.addUserDebugLine', 'p.addUserDebugLine', ([], {'lineFromXYZ': '[self.x_low_obs, self.y_high_obs, 0]', 'lineToXYZ': '[self.x_low_obs, self.y_high_obs, self.z_high_obs]'}), '(lineFromXYZ=[self.x_low_obs, self.y_high_obs, 0],\n lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])\n', (5088, 5204), True, 'import pybullet as p\n'), ((5237, 5371), 'pybullet.addUserDebugLine', 'p.addUserDebugLine', ([], {'lineFromXYZ': '[self.x_high_obs, self.y_low_obs, 0]', 'lineToXYZ': '[self.x_high_obs, self.y_low_obs, self.z_high_obs]'}), '(lineFromXYZ=[self.x_high_obs, self.y_low_obs, 0],\n lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])\n', (5255, 5371), True, 'import pybullet as p\n'), ((5404, 5540), 'pybullet.addUserDebugLine', 'p.addUserDebugLine', ([], {'lineFromXYZ': '[self.x_high_obs, self.y_high_obs, 0]', 'lineToXYZ': '[self.x_high_obs, self.y_high_obs, self.z_high_obs]'}), '(lineFromXYZ=[self.x_high_obs, self.y_high_obs, 0],\n lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])\n', (5422, 5540), True, 'import pybullet as p\n'), ((5575, 5723), 'pybullet.addUserDebugLine', 'p.addUserDebugLine', ([], {'lineFromXYZ': '[self.x_low_obs, self.y_low_obs, self.z_high_obs]', 'lineToXYZ': '[self.x_high_obs, self.y_low_obs, self.z_high_obs]'}), '(lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.\n z_high_obs], lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])\n', (5593, 5723), True, 'import pybullet as p\n'), ((5755, 5905), 'pybullet.addUserDebugLine', 'p.addUserDebugLine', ([], {'lineFromXYZ': '[self.x_low_obs, self.y_high_obs, self.z_high_obs]', 'lineToXYZ': '[self.x_high_obs, self.y_high_obs, self.z_high_obs]'}), '(lineFromXYZ=[self.x_low_obs, self.y_high_obs, self.\n z_high_obs], lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])\n', (5773, 5905), True, 'import pybullet as p\n'), ((5937, 6085), 'pybullet.addUserDebugLine', 'p.addUserDebugLine', ([], {'lineFromXYZ': '[self.x_low_obs, self.y_low_obs, self.z_high_obs]', 'lineToXYZ': '[self.x_low_obs, self.y_high_obs, self.z_high_obs]'}), '(lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.\n z_high_obs], lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])\n', (5955, 6085), True, 'import pybullet as p\n'), ((6117, 6267), 'pybullet.addUserDebugLine', 'p.addUserDebugLine', ([], {'lineFromXYZ': '[self.x_high_obs, self.y_low_obs, self.z_high_obs]', 'lineToXYZ': '[self.x_high_obs, self.y_high_obs, self.z_high_obs]'}), '(lineFromXYZ=[self.x_high_obs, self.y_low_obs, self.\n z_high_obs], lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])\n', (6135, 6267), True, 'import pybullet as p\n'), ((6808, 6866), 'pybullet.changeVisualShape', 'p.changeVisualShape', (['table_uid', '(-1)'], {'rgbaColor': '[1, 1, 1, 1]'}), '(table_uid, -1, rgbaColor=[1, 1, 1, 1])\n', (6827, 6866), True, 'import pybullet as p\n'), ((7436, 7464), 'pybullet.getNumJoints', 'p.getNumJoints', (['self.kuka_id'], {}), '(self.kuka_id)\n', (7450, 7464), True, 'import pybullet as p\n'), ((7831, 7849), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (7847, 7849), True, 'import pybullet as p\n'), ((7892, 8048), 'pybullet.getCameraImage', 'p.getCameraImage', ([], {'width': '(960)', 'height': '(960)', 'viewMatrix': 'self.view_matrix', 'projectionMatrix': 'self.projection_matrix', 'renderer': 'p.ER_BULLET_HARDWARE_OPENGL'}), '(width=960, height=960, viewMatrix=self.view_matrix,\n projectionMatrix=self.projection_matrix, renderer=p.\n ER_BULLET_HARDWARE_OPENGL)\n', (7908, 8048), True, 'import pybullet as p\n'), ((8205, 8318), 'pybullet.enableJointForceTorqueSensor', 'p.enableJointForceTorqueSensor', ([], {'bodyUniqueId': 'self.kuka_id', 'jointIndex': '(self.num_joints - 1)', 'enableSensor': '(True)'}), '(bodyUniqueId=self.kuka_id, jointIndex=self.\n num_joints - 1, enableSensor=True)\n', (8235, 8318), True, 'import pybullet as p\n'), ((9593, 9858), 'pybullet.calculateInverseKinematics', 'p.calculateInverseKinematics', ([], {'bodyUniqueId': 'self.kuka_id', 'endEffectorLinkIndex': '(self.num_joints - 1)', 'targetPosition': '[self.new_robot_pos[0], self.new_robot_pos[1], self.new_robot_pos[2]]', 'targetOrientation': 'self.orientation', 'jointDamping': 'self.joint_damping'}), '(bodyUniqueId=self.kuka_id,\n endEffectorLinkIndex=self.num_joints - 1, targetPosition=[self.\n new_robot_pos[0], self.new_robot_pos[1], self.new_robot_pos[2]],\n targetOrientation=self.orientation, jointDamping=self.joint_damping)\n', (9621, 9858), True, 'import pybullet as p\n'), ((10205, 10223), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (10221, 10223), True, 'import pybullet as p\n'), ((10978, 11017), 'math.sqrt', 'sqrt', (['(square_dx + square_dy + square_dz)'], {}), '(square_dx + square_dy + square_dz)\n', (10982, 11017), False, 'from math import sqrt\n'), ((11930, 12086), 'pybullet.getCameraImage', 'p.getCameraImage', ([], {'width': '(960)', 'height': '(960)', 'viewMatrix': 'self.view_matrix', 'projectionMatrix': 'self.projection_matrix', 'renderer': 'p.ER_BULLET_HARDWARE_OPENGL'}), '(width=960, height=960, viewMatrix=self.view_matrix,\n projectionMatrix=self.projection_matrix, renderer=p.\n ER_BULLET_HARDWARE_OPENGL)\n', (11946, 12086), True, 'import pybullet as p\n'), ((12489, 12503), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (12501, 12503), True, 'import pybullet as p\n'), ((13388, 13495), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': "(skip, self.kFinalImageSize['width'], self.kFinalImageSize['height'])"}), "(low=0, high=1, shape=(skip, self.kFinalImageSize['width'], self.\n kFinalImageSize['height']))\n", (13398, 13495), False, 'from gym import spaces\n'), ((1176, 1192), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (1185, 1192), True, 'import pybullet as p\n'), ((1221, 1240), 'pybullet.connect', 'p.connect', (['p.DIRECT'], {}), '(p.DIRECT)\n', (1230, 1240), True, 'import pybullet as p\n'), ((6312, 6359), 'os.path.join', 'os.path.join', (['self.urdf_root_path', '"""plane.urdf"""'], {}), "(self.urdf_root_path, 'plane.urdf')\n", (6324, 6359), False, 'import os\n'), ((6444, 6501), 'os.path.join', 'os.path.join', (['self.urdf_root_path', '"""kuka_iiwa/model.urdf"""'], {}), "(self.urdf_root_path, 'kuka_iiwa/model.urdf')\n", (6456, 6501), False, 'import os\n'), ((6637, 6690), 'os.path.join', 'os.path.join', (['self.urdf_root_path', '"""table/table.urdf"""'], {}), "(self.urdf_root_path, 'table/table.urdf')\n", (6649, 6690), False, 'import os\n'), ((6904, 6966), 'os.path.join', 'os.path.join', (['self.urdf_root_path', '"""random_urdfs/000/000.urdf"""'], {}), "(self.urdf_root_path, 'random_urdfs/000/000.urdf')\n", (6916, 6966), False, 'import os\n'), ((7522, 7627), 'pybullet.resetJointState', 'p.resetJointState', ([], {'bodyUniqueId': 'self.kuka_id', 'jointIndex': 'i', 'targetValue': 'self.init_joint_positions[i]'}), '(bodyUniqueId=self.kuka_id, jointIndex=i, targetValue=self\n .init_joint_positions[i])\n', (7539, 7627), True, 'import pybullet as p\n'), ((7722, 7771), 'pybullet.getLinkState', 'p.getLinkState', (['self.kuka_id', '(self.num_joints - 1)'], {}), '(self.kuka_id, self.num_joints - 1)\n', (7736, 7771), True, 'import pybullet as p\n'), ((8425, 8472), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.object_id'], {}), '(self.object_id)\n', (8456, 8472), True, 'import pybullet as p\n'), ((8905, 8944), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (8917, 8944), False, 'import cv2\n'), ((9119, 9185), 'numpy.zeros', 'np.zeros', (["(1, self.kImageSize['width'], self.kImageSize['height'])"], {}), "((1, self.kImageSize['width'], self.kImageSize['height']))\n", (9127, 9185), True, 'import numpy as np\n'), ((9356, 9405), 'pybullet.getLinkState', 'p.getLinkState', (['self.kuka_id', '(self.num_joints - 1)'], {}), '(self.kuka_id, self.num_joints - 1)\n', (9370, 9405), True, 'import pybullet as p\n'), ((10027, 10133), 'pybullet.resetJointState', 'p.resetJointState', ([], {'bodyUniqueId': 'self.kuka_id', 'jointIndex': 'i', 'targetValue': 'self.robot_joint_positions[i]'}), '(bodyUniqueId=self.kuka_id, jointIndex=i, targetValue=self\n .robot_joint_positions[i])\n', (10044, 10133), True, 'import pybullet as p\n'), ((10324, 10340), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (10334, 10340), False, 'import time\n'), ((10511, 10560), 'pybullet.getLinkState', 'p.getLinkState', (['self.kuka_id', '(self.num_joints - 1)'], {}), '(self.kuka_id, self.num_joints - 1)\n', (10525, 10560), True, 'import pybullet as p\n'), ((14028, 14053), 'numpy.concatenate', 'np.concatenate', (['states', '(0)'], {}), '(states, 0)\n', (14042, 14053), True, 'import numpy as np\n'), ((4029, 4096), 'numpy.array', 'np.array', (['[self.x_low_action, self.y_low_action, self.z_low_action]'], {}), '([self.x_low_action, self.y_low_action, self.z_low_action])\n', (4037, 4096), True, 'import numpy as np\n'), ((4130, 4200), 'numpy.array', 'np.array', (['[self.x_high_action, self.y_high_action, self.z_high_action]'], {}), '([self.x_high_action, self.y_high_action, self.z_high_action])\n', (4138, 4200), True, 'import numpy as np\n'), ((12578, 12652), 'pybullet.getJointState', 'p.getJointState', ([], {'bodyUniqueId': 'self.kuka_id', 'jointIndex': '(self.num_joints - 1)'}), '(bodyUniqueId=self.kuka_id, jointIndex=self.num_joints - 1)\n', (12593, 12652), True, 'import pybullet as p\n'), ((7111, 7158), 'random.uniform', 'random.uniform', (['self.x_low_obs', 'self.x_high_obs'], {}), '(self.x_low_obs, self.x_high_obs)\n', (7125, 7158), False, 'import random\n'), ((7257, 7304), 'random.uniform', 'random.uniform', (['self.y_low_obs', 'self.y_high_obs'], {}), '(self.y_low_obs, self.y_high_obs)\n', (7271, 7304), False, 'import random\n'), ((8966, 9038), 'cv2.resize', 'cv2.resize', (['image', "(self.kImageSize['width'], self.kImageSize['height'])"], {}), "(image, (self.kImageSize['width'], self.kImageSize['height']))\n", (8976, 9038), False, 'import cv2\n'), ((10618, 10665), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.object_id'], {}), '(self.object_id)\n', (10649, 10665), True, 'import pybullet as p\n')] |
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import altair as alt
import vega_datasets
alt.data_transformers.enable('default')
alt.data_transformers.disable_max_rows()
app = dash.Dash(__name__, assets_folder='assets', external_stylesheets=[dbc.themes.BOOTSTRAP])
# Boostrap CSS.
app.css.append_css({'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'}) # noqa: E501
server = app.server
app.title = 'Dash app with pure Altair HTML'
df = pd.read_csv('data/Police_Department_Incidents_-_Previous_Year__2016_.csv')
# df = pd.read_csv("https://raw.github.ubc.ca/MDS-2019-20/DSCI_531_lab4_anas017/master/data/Police_Department_Incidents_-_Previous_Year__2016_.csv?token=<PASSWORD>%3D")
df['datetime'] = pd.to_datetime(df[["Date","Time"]].apply(lambda x: x[0].split()[0] +" "+x[1], axis=1), format="%m/%d/%Y %H:%M")
df['hour'] = df['datetime'].dt.hour
df.dropna(inplace=True)
top_4_crimes = df['Category'].value_counts()[:6].index.to_list()
top_4_crimes
top_4_crimes.remove("NON-CRIMINAL")
top_4_crimes.remove("OTHER OFFENSES")
# top 4 crimes df subset
df_t4 = df[df["Category"].isin(top_4_crimes)].copy()
def make_plot_top(df_new=df_t4):
# Create a plot of the Displacement and the Horsepower of the cars dataset
# making the slider
slider = alt.binding_range(min = 0, max = 23, step = 1)
select_hour = alt.selection_single(name='select', fields = ['hour'],
bind = slider, init={'hour': 0})
#begin of my code
# typeDict = {'ASSAULT':'quantitative',
# 'VANDALISM':'quantitative',
# 'LARCENY/THEFT':'quantitative',
# 'VEHICLE THEFT':'quantitative'
# }
# end
chart = alt.Chart(df_new).mark_bar(size=30).encode(
x=alt.X('Category',type='nominal', title='Category'),
y=alt.Y('count()', title = "Count" , scale = alt.Scale(domain = (0,3300))),
tooltip='count()'
).properties(
title = "Per hour crime occurrences for the top 4 crimes",
width=500,
height = 315
).add_selection(
select_hour
).transform_filter(
select_hour
)
return chart
def make_plot_bot(data=df_t4):
chart_1 = alt.Chart(data).mark_circle(size=3, opacity = 0.8).encode(
longitude='X:Q',
latitude='Y:Q',
color = alt.Color('PdDistrict:N', legend = alt.Legend(title = "District")),
tooltip = 'PdDistrict'
).project(
type='albersUsa'
).properties(
width=450,
height=350
)
chart_2 = alt.Chart(data).mark_bar().encode(
x=alt.X('PdDistrict:N', axis=None, title="District"),
y=alt.Y('count()', title="Count of reports"),
color=alt.Color('PdDistrict:N', legend=alt.Legend(title="District")),
tooltip=['PdDistrict', 'count()']
).properties(
width=450,
height=350
)
# A dropdown filter
crimes_dropdown = alt.binding_select(options=list(data['Category'].unique()))
crimes_select = alt.selection_single(fields=['Category'], bind=crimes_dropdown,
name="Pick\ Crime")
combine_chart = (chart_2 | chart_1)
filter_crimes = combine_chart.add_selection(
crimes_select
).transform_filter(
crimes_select
)
return filter_crimes
body = dbc.Container(
[
dbc.Row(
[
dbc.Col(
[
html.H2("San Francisco Crime"),
html.P(
"""\
When looking for a place to live or visit, one important factor that people will consider
is the safety of the neighborhood. Searching that information district
by district could be time consuming and exhausting. It is even more difficult to
compare specific crime statistics across districts such as the crime rate
at a certain time of day. It would be useful if people can look up crime
related information across district on one application. Our app
aims to help people make decisions when considering their next trip or move to San Francisco, California
via visually exploring a dataset of crime statistics. The app provides an overview of the crime rate across
neighborhoods and allows users to focus on more specific information through
filtering of geological location, crime rate, crime type or time of the
crime.
Use the box below to choose crimes of interest.
"""
),
dcc.Dropdown(
id = 'drop_selection_crime',
options=[{'label': i, 'value': i} for i in df_t4['Category'].unique()
],
style={'height': '20px',
'width': '400px'},
value=df_t4['Category'].unique(),
multi=True)
],
md=5,
),
dbc.Col(
[
dbc.Row(
[
html.Iframe(
sandbox = "allow-scripts",
id = "plot_top",
height = "500",
width = "650",
style = {"border-width": "0px"},
srcDoc = make_plot_top().to_html()
)
]
)
]
),
]
),
dbc.Row(
html.Iframe(
sandbox='allow-scripts',
id='plot_bot',
height='500',
width='1200',
style={'border-width': '0px'},
srcDoc= make_plot_bot().to_html()
)
)
],
className="mt-4",
)
app.layout = html.Div(body)
@app.callback([dash.dependencies.Output('plot_top', 'srcDoc'),
dash.dependencies.Output('plot_bot', 'srcDoc')],
[dash.dependencies.Input('drop_selection_crime', 'value')]
)
def update_df(chosen):
new_df = df_t4[(df_t4["Category"].isin(chosen))]
updated_plot_top = make_plot_top(new_df).to_html()
updated_plot_bottom = make_plot_bot(new_df).to_html()
return updated_plot_top, updated_plot_bottom
if __name__ == '__main__':
app.run_server(debug=False) | [
"altair.selection_single",
"pandas.read_csv",
"altair.binding_range",
"dash_html_components.P",
"dash.dependencies.Output",
"altair.Chart",
"altair.Scale",
"dash.dependencies.Input",
"altair.data_transformers.enable",
"altair.X",
"altair.Y",
"altair.Legend",
"dash_html_components.H2",
"dash.Dash",
"altair.data_transformers.disable_max_rows",
"dash_html_components.Div"
]
| [((205, 244), 'altair.data_transformers.enable', 'alt.data_transformers.enable', (['"""default"""'], {}), "('default')\n", (233, 244), True, 'import altair as alt\n'), ((245, 285), 'altair.data_transformers.disable_max_rows', 'alt.data_transformers.disable_max_rows', ([], {}), '()\n', (283, 285), True, 'import altair as alt\n'), ((292, 385), 'dash.Dash', 'dash.Dash', (['__name__'], {'assets_folder': '"""assets"""', 'external_stylesheets': '[dbc.themes.BOOTSTRAP]'}), "(__name__, assets_folder='assets', external_stylesheets=[dbc.\n themes.BOOTSTRAP])\n", (301, 385), False, 'import dash\n'), ((566, 640), 'pandas.read_csv', 'pd.read_csv', (['"""data/Police_Department_Incidents_-_Previous_Year__2016_.csv"""'], {}), "('data/Police_Department_Incidents_-_Previous_Year__2016_.csv')\n", (577, 640), True, 'import pandas as pd\n'), ((6510, 6524), 'dash_html_components.Div', 'html.Div', (['body'], {}), '(body)\n', (6518, 6524), True, 'import dash_html_components as html\n'), ((1390, 1430), 'altair.binding_range', 'alt.binding_range', ([], {'min': '(0)', 'max': '(23)', 'step': '(1)'}), '(min=0, max=23, step=1)\n', (1407, 1430), True, 'import altair as alt\n'), ((1455, 1543), 'altair.selection_single', 'alt.selection_single', ([], {'name': '"""select"""', 'fields': "['hour']", 'bind': 'slider', 'init': "{'hour': 0}"}), "(name='select', fields=['hour'], bind=slider, init={\n 'hour': 0})\n", (1475, 1543), True, 'import altair as alt\n'), ((3121, 3210), 'altair.selection_single', 'alt.selection_single', ([], {'fields': "['Category']", 'bind': 'crimes_dropdown', 'name': '"""Pick\\\\ Crime"""'}), "(fields=['Category'], bind=crimes_dropdown, name=\n 'Pick\\\\ Crime')\n", (3141, 3210), True, 'import altair as alt\n'), ((6541, 6587), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""plot_top"""', '"""srcDoc"""'], {}), "('plot_top', 'srcDoc')\n", (6565, 6587), False, 'import dash\n'), ((6593, 6639), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""plot_bot"""', '"""srcDoc"""'], {}), "('plot_bot', 'srcDoc')\n", (6617, 6639), False, 'import dash\n'), ((6647, 6703), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""drop_selection_crime"""', '"""value"""'], {}), "('drop_selection_crime', 'value')\n", (6670, 6703), False, 'import dash\n'), ((2706, 2756), 'altair.X', 'alt.X', (['"""PdDistrict:N"""'], {'axis': 'None', 'title': '"""District"""'}), "('PdDistrict:N', axis=None, title='District')\n", (2711, 2756), True, 'import altair as alt\n'), ((2768, 2810), 'altair.Y', 'alt.Y', (['"""count()"""'], {'title': '"""Count of reports"""'}), "('count()', title='Count of reports')\n", (2773, 2810), True, 'import altair as alt\n'), ((3575, 3605), 'dash_html_components.H2', 'html.H2', (['"""San Francisco Crime"""'], {}), "('San Francisco Crime')\n", (3582, 3605), True, 'import dash_html_components as html\n'), ((3631, 4890), 'dash_html_components.P', 'html.P', (['""" When looking for a place to live or visit, one important factor that people will consider\n is the safety of the neighborhood. Searching that information district\n by district could be time consuming and exhausting. It is even more difficult to\n compare specific crime statistics across districts such as the crime rate\n at a certain time of day. It would be useful if people can look up crime\n related information across district on one application. Our app\n aims to help people make decisions when considering their next trip or move to San Francisco, California\n via visually exploring a dataset of crime statistics. The app provides an overview of the crime rate across\n neighborhoods and allows users to focus on more specific information through\n filtering of geological location, crime rate, crime type or time of the\n crime.\n\n Use the box below to choose crimes of interest.\n """'], {}), '(\n """ When looking for a place to live or visit, one important factor that people will consider\n is the safety of the neighborhood. Searching that information district\n by district could be time consuming and exhausting. It is even more difficult to\n compare specific crime statistics across districts such as the crime rate\n at a certain time of day. It would be useful if people can look up crime\n related information across district on one application. Our app\n aims to help people make decisions when considering their next trip or move to San Francisco, California\n via visually exploring a dataset of crime statistics. The app provides an overview of the crime rate across\n neighborhoods and allows users to focus on more specific information through\n filtering of geological location, crime rate, crime type or time of the\n crime.\n\n Use the box below to choose crimes of interest.\n """\n )\n', (3637, 4890), True, 'import dash_html_components as html\n'), ((2661, 2676), 'altair.Chart', 'alt.Chart', (['data'], {}), '(data)\n', (2670, 2676), True, 'import altair as alt\n'), ((2859, 2887), 'altair.Legend', 'alt.Legend', ([], {'title': '"""District"""'}), "(title='District')\n", (2869, 2887), True, 'import altair as alt\n'), ((1880, 1931), 'altair.X', 'alt.X', (['"""Category"""'], {'type': '"""nominal"""', 'title': '"""Category"""'}), "('Category', type='nominal', title='Category')\n", (1885, 1931), True, 'import altair as alt\n'), ((2321, 2336), 'altair.Chart', 'alt.Chart', (['data'], {}), '(data)\n', (2330, 2336), True, 'import altair as alt\n'), ((2480, 2508), 'altair.Legend', 'alt.Legend', ([], {'title': '"""District"""'}), "(title='District')\n", (2490, 2508), True, 'import altair as alt\n'), ((1826, 1843), 'altair.Chart', 'alt.Chart', (['df_new'], {}), '(df_new)\n', (1835, 1843), True, 'import altair as alt\n'), ((1985, 2012), 'altair.Scale', 'alt.Scale', ([], {'domain': '(0, 3300)'}), '(domain=(0, 3300))\n', (1994, 2012), True, 'import altair as alt\n')] |
import os
import pdb
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
import torch.optim as optim
import dataloaders
from utils.utils import AverageMeter
from utils.loss import build_criterion
from utils.metrics import Evaluator
from utils.step_lr_scheduler import Iter_LR_Scheduler
from retrain_model.build_autodeeplab import Retrain_Autodeeplab
from config_utils.re_train_autodeeplab import obtain_retrain_autodeeplab_args
def main():
warnings.filterwarnings('ignore')
assert torch.cuda.is_available()
torch.backends.cudnn.benchmark = True
args = obtain_retrain_autodeeplab_args()
save_dir = os.path.join('./data/', args.save_path)
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
model_fname = os.path.join(save_dir,
'deeplab_{0}_{1}_v3_{2}_epoch%d.pth'.format(args.backbone, args.dataset, args.exp))
record_name = os.path.join(save_dir, 'training_record.txt')
if args.dataset == 'pascal':
raise NotImplementedError
elif args.dataset == 'cityscapes':
kwargs = {'num_workers': args.workers, 'pin_memory': True, 'drop_last': True}
dataset_loader, num_classes, val_loader = dataloaders.make_data_loader(args, **kwargs)
args.num_classes = num_classes
else:
raise ValueError('Unknown dataset: {}'.format(args.dataset))
if args.backbone == 'autodeeplab':
model = Retrain_Autodeeplab(args)
else:
raise ValueError('Unknown backbone: {}'.format(args.backbone))
if args.criterion == 'Ohem':
args.thresh = 0.7
args.crop_size = [args.crop_size, args.crop_size] if isinstance(args.crop_size, int) else args.crop_size
args.n_min = int((args.batch_size / len(args.gpu) * args.crop_size[0] * args.crop_size[1]) // 16)
criterion = build_criterion(args)
model = nn.DataParallel(model).cuda()
model.train()
if args.freeze_bn:
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
m.weight.requires_grad = False
m.bias.requires_grad = False
optimizer = optim.SGD(model.module.parameters(), lr=args.base_lr, momentum=0.9, weight_decay=0.0001)
max_iteration = len(dataset_loader) * args.epochs
scheduler = Iter_LR_Scheduler(args, max_iteration, len(dataset_loader))
start_epoch = 0
evaluator=Evaluator(num_classes)
if args.resume:
if os.path.isfile(args.resume):
print('=> loading checkpoint {0}'.format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('=> loaded checkpoint {0} (epoch {1})'.format(args.resume, checkpoint['epoch']))
else:
raise ValueError('=> no checkpoint found at {0}'.format(args.resume))
for epoch in range(start_epoch, args.epochs):
losses = AverageMeter()
print('Training epoch {}'.format(epoch))
model.train()
for i, sample in enumerate(dataset_loader):
cur_iter = epoch * len(dataset_loader) + i
scheduler(optimizer, cur_iter)
inputs = sample['image'].cuda()
target = sample['label'].cuda()
outputs = model(inputs)
loss = criterion(outputs, target)
if np.isnan(loss.item()) or np.isinf(loss.item()):
pdb.set_trace()
losses.update(loss.item(), args.batch_size)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if (i + 1) % 200 == 0:
print('epoch: {0}\t''iter: {1}/{2}\t''lr: {3:.6f}\t''loss: {loss.val:.4f} ({loss.ema:.4f})'.format(
epoch + 1, i + 1, len(dataset_loader), scheduler.get_lr(optimizer), loss=losses))
if epoch < args.epochs:
if (epoch+1) % 5 == 0:
torch.save({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, model_fname % (epoch + 1))
else:
torch.save({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, model_fname % (epoch + 1))
line0 = 'epoch: {0}\t''loss: {loss.val:.4f} ({loss.ema:.4f})'.format(
epoch, loss=losses)
with open(record_name, 'a') as f:
f.write(line0)
if line0[-1] != '\n':
f.write('\n')
if epoch%3!=0 and epoch <args.epochs-20:
continue
print('Validate epoch {}'.format(epoch))
model.eval()
evaluator.reset()
test_loss=0.0
for i,sample in enumerate(val_loader):
inputs = sample['image'].cuda()
target = sample['label'].cuda()
with torch.no_grad():
outputs = model(inputs)
# loss = criterion(outputs, target)
# test_loss+=loss.item()
pred=outputs.data.cpu().numpy()
target=target.cpu().numpy()
pred = np.argmax(pred, axis=1)
evaluator.add_batch(target,pred)
Acc = evaluator.Pixel_Accuracy()
Acc_class = evaluator.Pixel_Accuracy_Class()
mIoU = evaluator.Mean_Intersection_over_Union()
FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
print("epoch: {}\t Acc:{:.3f}, Acc_class:{:.3f}, mIoU:{:.3f}, fwIoU: {:.3f}".format(epoch,Acc, Acc_class, mIoU, FWIoU))
line1='epoch: {}\t''mIoU: {:.3f}'.format(epoch,mIoU)
with open(record_name, 'a') as f:
f.write(line1)
if line1[-1] != '\n':
f.write('\n')
if __name__ == "__main__":
main()
| [
"retrain_model.build_autodeeplab.Retrain_Autodeeplab",
"torch.load",
"os.path.join",
"torch.nn.DataParallel",
"numpy.argmax",
"dataloaders.make_data_loader",
"os.path.isfile",
"torch.cuda.is_available",
"os.path.isdir",
"os.mkdir",
"utils.utils.AverageMeter",
"pdb.set_trace",
"torch.no_grad",
"config_utils.re_train_autodeeplab.obtain_retrain_autodeeplab_args",
"utils.loss.build_criterion",
"warnings.filterwarnings",
"utils.metrics.Evaluator"
]
| [((518, 551), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (541, 551), False, 'import warnings\n'), ((563, 588), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (586, 588), False, 'import torch\n'), ((642, 675), 'config_utils.re_train_autodeeplab.obtain_retrain_autodeeplab_args', 'obtain_retrain_autodeeplab_args', ([], {}), '()\n', (673, 675), False, 'from config_utils.re_train_autodeeplab import obtain_retrain_autodeeplab_args\n'), ((691, 730), 'os.path.join', 'os.path.join', (['"""./data/"""', 'args.save_path'], {}), "('./data/', args.save_path)\n", (703, 730), False, 'import os\n'), ((968, 1013), 'os.path.join', 'os.path.join', (['save_dir', '"""training_record.txt"""'], {}), "(save_dir, 'training_record.txt')\n", (980, 1013), False, 'import os\n'), ((1877, 1898), 'utils.loss.build_criterion', 'build_criterion', (['args'], {}), '(args)\n', (1892, 1898), False, 'from utils.loss import build_criterion\n'), ((2451, 2473), 'utils.metrics.Evaluator', 'Evaluator', (['num_classes'], {}), '(num_classes)\n', (2460, 2473), False, 'from utils.metrics import Evaluator\n'), ((742, 765), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (755, 765), False, 'import os\n'), ((775, 793), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (783, 793), False, 'import os\n'), ((1475, 1500), 'retrain_model.build_autodeeplab.Retrain_Autodeeplab', 'Retrain_Autodeeplab', (['args'], {}), '(args)\n', (1494, 1500), False, 'from retrain_model.build_autodeeplab import Retrain_Autodeeplab\n'), ((2506, 2533), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (2520, 2533), False, 'import os\n'), ((3083, 3097), 'utils.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3095, 3097), False, 'from utils.utils import AverageMeter\n'), ((1256, 1300), 'dataloaders.make_data_loader', 'dataloaders.make_data_loader', (['args'], {}), '(args, **kwargs)\n', (1284, 1300), False, 'import dataloaders\n'), ((1912, 1934), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (1927, 1934), True, 'import torch.nn as nn\n'), ((2627, 2650), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (2637, 2650), False, 'import torch\n'), ((5329, 5352), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (5338, 5352), True, 'import numpy as np\n'), ((3568, 3583), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3581, 3583), False, 'import pdb\n'), ((5084, 5099), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5097, 5099), False, 'import torch\n')] |
# The MIT License (MIT)
# Copyright (c) 2021 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import machine
from pmu import axp192
from context import Context
from login import Login
from home import Home
import settings
pmu = axp192()
# Enable power management so that if power button is held down 6 secs,
# it shuts off as expected
pmu.enablePMICSleepMode(True)
ctx = Context()
ctx.display.flash_text(settings.load('splash', ( 'Krux' ), strip=False))
while True:
if not Login(ctx).run():
break
if not Home(ctx).run():
break
ctx.display.flash_text(( 'Shutting down..' ))
ctx.clear()
pmu.setEnterSleepMode()
machine.reset()
| [
"home.Home",
"login.Login",
"context.Context",
"pmu.axp192",
"machine.reset",
"settings.load"
]
| [((1242, 1250), 'pmu.axp192', 'axp192', ([], {}), '()\n', (1248, 1250), False, 'from pmu import axp192\n'), ((1387, 1396), 'context.Context', 'Context', ([], {}), '()\n', (1394, 1396), False, 'from context import Context\n'), ((1655, 1670), 'machine.reset', 'machine.reset', ([], {}), '()\n', (1668, 1670), False, 'import machine\n'), ((1421, 1465), 'settings.load', 'settings.load', (['"""splash"""', '"""Krux"""'], {'strip': '(False)'}), "('splash', 'Krux', strip=False)\n", (1434, 1465), False, 'import settings\n'), ((1495, 1505), 'login.Login', 'Login', (['ctx'], {}), '(ctx)\n', (1500, 1505), False, 'from login import Login\n'), ((1539, 1548), 'home.Home', 'Home', (['ctx'], {}), '(ctx)\n', (1543, 1548), False, 'from home import Home\n')] |
## Program: VMTK
## Language: Python
## Date: January 12, 2018
## Version: 1.4
## Copyright (c) <NAME>, <NAME>, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## <NAME> (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtksurfaceconnectivity as connectivity
import os
@pytest.fixture(scope='module')
def aorta_surface_two_segments(input_datadir):
import vmtk.vmtksurfacereader as surfacereader
reader = surfacereader.vmtkSurfaceReader()
reader.InputFileName = os.path.join(input_datadir, 'aorta-surface-two-segments.vtp')
reader.Execute()
return reader.Surface
def test_extract_largest_surface(aorta_surface_two_segments, compare_surfaces):
name = __name__ + '_test_extract_largest_surface.vtp'
connectiv = connectivity.vmtkSurfaceConnectivity()
connectiv.Surface = aorta_surface_two_segments
connectiv.Method = 'largest'
connectiv.CleanOutput = 1
connectiv.Execute()
assert compare_surfaces(connectiv.Surface, name) == True
def test_extract_closest_to_reference_surface(aorta_surface_two_segments, aorta_surface_reference, compare_surfaces):
name = __name__ + '_test_extract_closest_to_reference_surface.vtp'
connectiv = connectivity.vmtkSurfaceConnectivity()
connectiv.Surface = aorta_surface_two_segments
connectiv.Method = 'closest'
connectiv.ReferenceSurface = aorta_surface_reference
connectiv.Execute()
assert compare_surfaces(connectiv.Surface, name) == True
def test_extract_closest_to_point(aorta_surface_two_segments, compare_surfaces):
name = __name__ + '_test_extract_closest_to_point.vtp'
connectiv = connectivity.vmtkSurfaceConnectivity()
connectiv.Surface = aorta_surface_two_segments
connectiv.Method = 'closest'
connectiv.ClosestPoint = [0.0, 0.0, 0.0]
connectiv.Execute()
assert compare_surfaces(connectiv.Surface, name) == True
| [
"pytest.fixture",
"vmtk.vmtksurfacereader.vmtkSurfaceReader",
"vmtk.vmtksurfaceconnectivity.vmtkSurfaceConnectivity",
"os.path.join"
]
| [((583, 613), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (597, 613), False, 'import pytest\n'), ((725, 758), 'vmtk.vmtksurfacereader.vmtkSurfaceReader', 'surfacereader.vmtkSurfaceReader', ([], {}), '()\n', (756, 758), True, 'import vmtk.vmtksurfacereader as surfacereader\n'), ((786, 847), 'os.path.join', 'os.path.join', (['input_datadir', '"""aorta-surface-two-segments.vtp"""'], {}), "(input_datadir, 'aorta-surface-two-segments.vtp')\n", (798, 847), False, 'import os\n'), ((1051, 1089), 'vmtk.vmtksurfaceconnectivity.vmtkSurfaceConnectivity', 'connectivity.vmtkSurfaceConnectivity', ([], {}), '()\n', (1087, 1089), True, 'import vmtk.vmtksurfaceconnectivity as connectivity\n'), ((1497, 1535), 'vmtk.vmtksurfaceconnectivity.vmtkSurfaceConnectivity', 'connectivity.vmtkSurfaceConnectivity', ([], {}), '()\n', (1533, 1535), True, 'import vmtk.vmtksurfaceconnectivity as connectivity\n'), ((1921, 1959), 'vmtk.vmtksurfaceconnectivity.vmtkSurfaceConnectivity', 'connectivity.vmtkSurfaceConnectivity', ([], {}), '()\n', (1957, 1959), True, 'import vmtk.vmtksurfaceconnectivity as connectivity\n')] |
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne import pick_types
from mne.datasets import testing
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.cnt import read_raw_cnt
from mne.annotations import read_annotations
data_path = testing.data_path(download=False)
fname = op.join(data_path, 'CNT', 'scan41_short.cnt')
@testing.requires_testing_data
def test_data():
"""Test reading raw cnt files."""
with pytest.warns(RuntimeWarning, match='number of bytes'):
raw = _test_raw_reader(read_raw_cnt, input_fname=fname,
eog='auto', misc=['NA1', 'LEFT_EAR'])
# make sure we use annotations event if we synthesized stim
assert len(raw.annotations) == 6
eog_chs = pick_types(raw.info, eog=True, exclude=[])
assert len(eog_chs) == 2 # test eog='auto'
assert raw.info['bads'] == ['LEFT_EAR', 'VEOGR'] # test bads
# the data has "05/10/200 17:35:31" so it is set to None
assert raw.info['meas_date'] is None
@testing.requires_testing_data
def test_compare_events_and_annotations():
"""Test comparing annotations and events."""
with pytest.warns(RuntimeWarning, match='Could not parse meas date'):
raw = read_raw_cnt(fname)
events = np.array([[333, 0, 7],
[1010, 0, 7],
[1664, 0, 109],
[2324, 0, 7],
[2984, 0, 109]])
annot = read_annotations(fname)
assert len(annot) == 6
assert_array_equal(annot.onset[:-1], events[:, 0] / raw.info['sfreq'])
assert 'STI 014' not in raw.info['ch_names']
| [
"mne.datasets.testing.data_path",
"mne.pick_types",
"mne.io.cnt.read_raw_cnt",
"os.path.join",
"mne.io.tests.test_raw._test_raw_reader",
"pytest.warns",
"numpy.array",
"numpy.testing.assert_array_equal",
"mne.annotations.read_annotations"
]
| [((388, 421), 'mne.datasets.testing.data_path', 'testing.data_path', ([], {'download': '(False)'}), '(download=False)\n', (405, 421), False, 'from mne.datasets import testing\n'), ((430, 475), 'os.path.join', 'op.join', (['data_path', '"""CNT"""', '"""scan41_short.cnt"""'], {}), "(data_path, 'CNT', 'scan41_short.cnt')\n", (437, 475), True, 'import os.path as op\n'), ((878, 920), 'mne.pick_types', 'pick_types', (['raw.info'], {'eog': '(True)', 'exclude': '[]'}), '(raw.info, eog=True, exclude=[])\n', (888, 920), False, 'from mne import pick_types\n'), ((1384, 1471), 'numpy.array', 'np.array', (['[[333, 0, 7], [1010, 0, 7], [1664, 0, 109], [2324, 0, 7], [2984, 0, 109]]'], {}), '([[333, 0, 7], [1010, 0, 7], [1664, 0, 109], [2324, 0, 7], [2984, 0,\n 109]])\n', (1392, 1471), True, 'import numpy as np\n'), ((1573, 1596), 'mne.annotations.read_annotations', 'read_annotations', (['fname'], {}), '(fname)\n', (1589, 1596), False, 'from mne.annotations import read_annotations\n'), ((1628, 1698), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['annot.onset[:-1]', "(events[:, 0] / raw.info['sfreq'])"], {}), "(annot.onset[:-1], events[:, 0] / raw.info['sfreq'])\n", (1646, 1698), False, 'from numpy.testing import assert_array_equal\n'), ((573, 626), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {'match': '"""number of bytes"""'}), "(RuntimeWarning, match='number of bytes')\n", (585, 626), False, 'import pytest\n'), ((642, 733), 'mne.io.tests.test_raw._test_raw_reader', '_test_raw_reader', (['read_raw_cnt'], {'input_fname': 'fname', 'eog': '"""auto"""', 'misc': "['NA1', 'LEFT_EAR']"}), "(read_raw_cnt, input_fname=fname, eog='auto', misc=['NA1',\n 'LEFT_EAR'])\n", (658, 733), False, 'from mne.io.tests.test_raw import _test_raw_reader\n'), ((1272, 1335), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {'match': '"""Could not parse meas date"""'}), "(RuntimeWarning, match='Could not parse meas date')\n", (1284, 1335), False, 'import pytest\n'), ((1351, 1370), 'mne.io.cnt.read_raw_cnt', 'read_raw_cnt', (['fname'], {}), '(fname)\n', (1363, 1370), False, 'from mne.io.cnt import read_raw_cnt\n')] |
import urllib.request, json
import pandas as pd
baseUrl = 'https://avoindata.eduskunta.fi/api/v1/tables/VaskiData'
parameters = 'rows?columnName=Eduskuntatunnus&columnValue=LA%25&perPage=100'
page = 0
df = ''
while True:
print(f'Fetching page number {page}')
with urllib.request.urlopen(f'{baseUrl}/{parameters}&page={page}') as url:
data = json.loads(url.read().decode())
if page == 0:
columns = data['columnNames']
df = pd.DataFrame(columns=columns)
dataRows = data['rowData']
df = df.append(pd.DataFrame(dataRows, columns=data['columnNames']), ignore_index=True)
if data['hasMore'] == False:
break
page = page + 1
df.to_csv('./data/parliament_proposals_raw.csv', sep=';', encoding='utf-8') | [
"pandas.DataFrame"
]
| [((474, 503), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (486, 503), True, 'import pandas as pd\n'), ((563, 614), 'pandas.DataFrame', 'pd.DataFrame', (['dataRows'], {'columns': "data['columnNames']"}), "(dataRows, columns=data['columnNames'])\n", (575, 614), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 20 16:15:37 2021
@author: em42363
"""
# In[1]: Import functions
'''
CatBoost is a high-performance open source library for gradient boosting
on decision trees
'''
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
import pandas as pd
import seaborn as sns
import numpy as np
import os
os.chdir(os.path.dirname(__file__))
import sys
sys.path.insert(0, r'C:\Users\eduar\OneDrive\PhD\UTuning')
sys.path.insert(0, r'C:\Users\em42363\OneDrive\PhD\UTuning')
from UTuning import scorer, plots
#df = pd.read_csv(r'C:\Users\eduar\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
df = pd.read_csv(r'C:\Users\em42363\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
import random
import matplotlib.pyplot as plt
# In[1]: Split train test
'''
Perform split train test
'''
y = df['Production'].values
X = df[['Por', 'LogPerm', 'Brittle', 'TOC']].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
# In[6]: Regressor
'''
Define the regressor, fit the model and predict the estimates
'''
model = CatBoostRegressor(iterations=1000, learning_rate=0.2, loss_function='RMSEWithUncertainty',
verbose=False, random_seed=0)
model.fit(X_train, y_train)
estimates = model.predict(X_test)
# In[9]: Plot error line
'''
Use UTuning to plot error lines
'''
plots.error_line(estimates[:, 0], y_test, np.sqrt(estimates[:, 1]), Frac=1)
# %% Define the virtual ensemble
def virt_ensemble(X_train,y_train, num_samples=100, iters=1000, lr=0.1): # 100, .1
ens_preds = []
model = CatBoostRegressor(iterations=iters, learning_rate=lr, loss_function='RMSEWithUncertainty',
verbose=False, random_seed=1)
model.fit(X_train,y_train)
ens_preds = model.virtual_ensembles_predict(X_test, prediction_type='VirtEnsembles',
virtual_ensembles_count=num_samples,
thread_count=8)
return np.asarray(ens_preds)
# %%
n_quantiles = 11
perc = np.linspace(0.0, 1.00, n_quantiles)
Samples = 10
ens_preds=virt_ensemble(X_train,y_train, num_samples=Samples)
Pred_array = ens_preds[:,:,0]
Knowledge_u=np.sqrt(np.var(Pred_array,axis=1)) #Knowledge uncertainty
Data_u=np.sqrt(np.mean(ens_preds[:,:,1],axis=1)) #Data uncertainty
Sigma=Knowledge_u+Data_u
# %%
'''
We use UTuning to return the Indicator Function and plot the
accuracy plot and diagnose our model.
'''
scorer = scorer.scorer(Pred_array, y_test, Sigma)
IF_array = scorer.IndicatorFunction()
avgIF = np.mean(IF_array,axis=0)
# % Second plot test
plots.error_accuracy_plot(perc,IF_array,Pred_array,y_test,Sigma)
# %
print('Accuracy = {0:2.2f}'.format(scorer.Accuracy()))
print('Precision = {0:2.2f}'.format(scorer.Precision()))
print('Goodness = {0:2.2f}'.format(scorer.Goodness()))
| [
"numpy.mean",
"sys.path.insert",
"numpy.sqrt",
"UTuning.scorer.Precision",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"UTuning.scorer.Accuracy",
"numpy.asarray",
"UTuning.plots.error_accuracy_plot",
"catboost.CatBoostRegressor",
"os.path.dirname",
"numpy.linspace",
"UTuning.scorer.Goodness",
"UTuning.scorer.scorer",
"UTuning.scorer.IndicatorFunction",
"numpy.var"
]
| [((425, 487), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""C:\\\\Users\\\\eduar\\\\OneDrive\\\\PhD\\\\UTuning"""'], {}), "(0, 'C:\\\\Users\\\\eduar\\\\OneDrive\\\\PhD\\\\UTuning')\n", (440, 487), False, 'import sys\n'), ((484, 548), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""C:\\\\Users\\\\em42363\\\\OneDrive\\\\PhD\\\\UTuning"""'], {}), "(0, 'C:\\\\Users\\\\em42363\\\\OneDrive\\\\PhD\\\\UTuning')\n", (499, 548), False, 'import sys\n'), ((666, 752), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\em42363\\\\OneDrive\\\\PhD\\\\UTuning\\\\dataset\\\\unconv_MV.csv"""'], {}), "(\n 'C:\\\\Users\\\\em42363\\\\OneDrive\\\\PhD\\\\UTuning\\\\dataset\\\\unconv_MV.csv')\n", (677, 752), True, 'import pandas as pd\n'), ((965, 1003), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.33)'}), '(X, y, test_size=0.33)\n', (981, 1003), False, 'from sklearn.model_selection import train_test_split\n'), ((1103, 1228), 'catboost.CatBoostRegressor', 'CatBoostRegressor', ([], {'iterations': '(1000)', 'learning_rate': '(0.2)', 'loss_function': '"""RMSEWithUncertainty"""', 'verbose': '(False)', 'random_seed': '(0)'}), "(iterations=1000, learning_rate=0.2, loss_function=\n 'RMSEWithUncertainty', verbose=False, random_seed=0)\n", (1120, 1228), False, 'from catboost import CatBoostRegressor\n'), ((2098, 2132), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'n_quantiles'], {}), '(0.0, 1.0, n_quantiles)\n', (2109, 2132), True, 'import numpy as np\n'), ((2527, 2567), 'UTuning.scorer.scorer', 'scorer.scorer', (['Pred_array', 'y_test', 'Sigma'], {}), '(Pred_array, y_test, Sigma)\n', (2540, 2567), False, 'from UTuning import scorer, plots\n'), ((2580, 2606), 'UTuning.scorer.IndicatorFunction', 'scorer.IndicatorFunction', ([], {}), '()\n', (2604, 2606), False, 'from UTuning import scorer, plots\n'), ((2615, 2640), 'numpy.mean', 'np.mean', (['IF_array'], {'axis': '(0)'}), '(IF_array, axis=0)\n', (2622, 2640), True, 'import numpy as np\n'), ((2662, 2730), 'UTuning.plots.error_accuracy_plot', 'plots.error_accuracy_plot', (['perc', 'IF_array', 'Pred_array', 'y_test', 'Sigma'], {}), '(perc, IF_array, Pred_array, y_test, Sigma)\n', (2687, 2730), False, 'from UTuning import scorer, plots\n'), ((386, 411), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (401, 411), False, 'import os\n'), ((1421, 1445), 'numpy.sqrt', 'np.sqrt', (['estimates[:, 1]'], {}), '(estimates[:, 1])\n', (1428, 1445), True, 'import numpy as np\n'), ((1609, 1734), 'catboost.CatBoostRegressor', 'CatBoostRegressor', ([], {'iterations': 'iters', 'learning_rate': 'lr', 'loss_function': '"""RMSEWithUncertainty"""', 'verbose': '(False)', 'random_seed': '(1)'}), "(iterations=iters, learning_rate=lr, loss_function=\n 'RMSEWithUncertainty', verbose=False, random_seed=1)\n", (1626, 1734), False, 'from catboost import CatBoostRegressor\n'), ((2046, 2067), 'numpy.asarray', 'np.asarray', (['ens_preds'], {}), '(ens_preds)\n', (2056, 2067), True, 'import numpy as np\n'), ((2263, 2289), 'numpy.var', 'np.var', (['Pred_array'], {'axis': '(1)'}), '(Pred_array, axis=1)\n', (2269, 2289), True, 'import numpy as np\n'), ((2328, 2363), 'numpy.mean', 'np.mean', (['ens_preds[:, :, 1]'], {'axis': '(1)'}), '(ens_preds[:, :, 1], axis=1)\n', (2335, 2363), True, 'import numpy as np\n'), ((2767, 2784), 'UTuning.scorer.Accuracy', 'scorer.Accuracy', ([], {}), '()\n', (2782, 2784), False, 'from UTuning import scorer, plots\n'), ((2823, 2841), 'UTuning.scorer.Precision', 'scorer.Precision', ([], {}), '()\n', (2839, 2841), False, 'from UTuning import scorer, plots\n'), ((2879, 2896), 'UTuning.scorer.Goodness', 'scorer.Goodness', ([], {}), '()\n', (2894, 2896), False, 'from UTuning import scorer, plots\n')] |
import logging
from platform import system
from tqdm import tqdm
from multiprocessing import Lock
loggers = {}
# https://stackoverflow.com/questions/38543506/
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super(TqdmLoggingHandler, self).__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.set_lock(Lock())
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def setup_custom_logger(name):
"""
Create a logger with a certain name and level
"""
global loggers
if loggers.get(name):
return loggers.get(name)
formatter = logging.Formatter(
fmt='%(levelname)s: %(message)s'
)
handler = TqdmLoggingHandler()
handler.setFormatter(formatter)
if system() not in ['Windows', 'cli']:
logging.addLevelName(logging.ERROR, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.INFO, "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(logging.DEBUG, "\033[1;35m%s\033[1;0m" % logging.getLevelName(logging.DEBUG))
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING)
# if (logger.hasHandlers()):
# logger.handlers.clear()
if logger.handlers:
logger.handlers = []
logger.addHandler(handler)
loggers.update(dict(name=logger))
return logger
| [
"logging.getLogger",
"tqdm.tqdm.write",
"logging.Formatter",
"platform.system",
"logging.getLevelName",
"multiprocessing.Lock"
]
| [((787, 838), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(levelname)s: %(message)s"""'}), "(fmt='%(levelname)s: %(message)s')\n", (804, 838), False, 'import logging\n'), ((1413, 1436), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1430, 1436), False, 'import logging\n'), ((933, 941), 'platform.system', 'system', ([], {}), '()\n', (939, 941), False, 'from platform import system\n'), ((433, 448), 'tqdm.tqdm.write', 'tqdm.write', (['msg'], {}), '(msg)\n', (443, 448), False, 'from tqdm import tqdm\n'), ((413, 419), 'multiprocessing.Lock', 'Lock', ([], {}), '()\n', (417, 419), False, 'from multiprocessing import Lock\n'), ((1039, 1074), 'logging.getLevelName', 'logging.getLevelName', (['logging.ERROR'], {}), '(logging.ERROR)\n', (1059, 1074), False, 'import logging\n'), ((1148, 1185), 'logging.getLevelName', 'logging.getLevelName', (['logging.WARNING'], {}), '(logging.WARNING)\n', (1168, 1185), False, 'import logging\n'), ((1256, 1290), 'logging.getLevelName', 'logging.getLevelName', (['logging.INFO'], {}), '(logging.INFO)\n', (1276, 1290), False, 'import logging\n'), ((1362, 1397), 'logging.getLevelName', 'logging.getLevelName', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (1382, 1397), False, 'import logging\n')] |
import cv2
import sys
import playsound
face_cascade = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')
# capture video using cv2
video_capture = cv2.VideoCapture(0)
while True:
# capture frame by frame, i.e, one by one
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# for each face on the projected on the frame
faces = face_cascade.detectMultiScale(
gray,
scaleFactor = 1.1,
minNeighbors = 5,
# minSize(35, 35)
)
# loop through the video faces for detection
for (x, y, w, h) in faces:
point1 = x+w
point2 = y+h
frame_color = (50, 50, 200)
rectangleBox = cv2.rectangle(frame, (x, y), (point1, point2), frame_color, 2)
cv2.imshow('video', frame)
if faces.any():
playsound.playsound('openDoorAlert.mp3', True)
if len(faces) > 1:
print("There are " + str(len(faces)) + " peoples at the gate")
else:
print("There is " + str(len(faces)) + " person at the gate")
else:
pass
if cv2.waitKey(1) & 0xFF == ord('q'):
sys.exit()
| [
"cv2.rectangle",
"playsound.playsound",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.cvtColor",
"sys.exit",
"cv2.CascadeClassifier",
"cv2.waitKey"
]
| [((55, 124), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""cascades/haarcascade_frontalface_default.xml"""'], {}), "('cascades/haarcascade_frontalface_default.xml')\n", (76, 124), False, 'import cv2\n'), ((168, 187), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (184, 187), False, 'import cv2\n'), ((302, 341), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (314, 341), False, 'import cv2\n'), ((735, 797), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(point1, point2)', 'frame_color', '(2)'], {}), '(frame, (x, y), (point1, point2), frame_color, 2)\n', (748, 797), False, 'import cv2\n'), ((806, 832), 'cv2.imshow', 'cv2.imshow', (['"""video"""', 'frame'], {}), "('video', frame)\n", (816, 832), False, 'import cv2\n'), ((869, 915), 'playsound.playsound', 'playsound.playsound', (['"""openDoorAlert.mp3"""', '(True)'], {}), "('openDoorAlert.mp3', True)\n", (888, 915), False, 'import playsound\n'), ((1223, 1233), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1231, 1233), False, 'import sys\n'), ((1176, 1190), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1187, 1190), False, 'import cv2\n')] |
# vim:set et sw=4 ts=4:
import logging
import sys
import jmespath
from . import sis, classes
# logging
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
logger = logging.getLogger(__name__)
# SIS endpoint
enrollments_uri = "https://apis.berkeley.edu/sis/v2/enrollments"
# apparently some courses have LAB without LEC (?)
section_codes = ['LEC', 'SES', 'WBL']
async def get_student_enrollments(app_id, app_key, identifier, term_id,
id_type='campus-uid', enrolled_only='true', primary_only='true',
course_attr='course-id'):
'''Gets a students enrollments.'''
uri = enrollments_uri + f"/students/{identifier}"
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
"page-number": 1,
"page-size": 100, # maximum
"id-type": id_type,
"term-id": term_id,
"enrolled-only": enrolled_only,
"primary-only": primary_only,
}
enrollments = await sis.get_items(uri, params, headers, 'studentEnrollments')
logger.debug(f"enrollments: {enrollments}")
if course_attr == 'course-id':
flt = '[].classSection.class.course.identifiers[?type == `cs-course-id`].id[]'
elif course_attr == 'display-name':
flt = '[].classSection.class.course.displayName'
return jmespath.search(flt, enrollments)
async def get_section_enrollments(app_id, app_key, term_id, section_id):
'''Gets a course section's enrollments.'''
uri = enrollments_uri + f"/terms/{term_id}/classes/sections/{section_id}"
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
"page-number": 1,
"page-size": 100, # maximum
}
enrollments = await sis.get_items(uri, params, headers, 'classSectionEnrollments')
logger.info(f"{section_id}: {len(enrollments)}")
return enrollments
def section_id(section):
'''Return a section's course ID, e.g. "15807".'''
return section['id']
def section_subject_area(section):
'''Return a section's subject area, e.g. "STAT".'''
return jmespath.search('class.course.subjectArea.code', section)
def section_catalog_number(section):
'''Return a section's formatted catalog number, e.g. "215B".'''
return jmespath.search('class.course.catalogNumber.formatted', section)
def section_display_name(section):
'''Return a section's displayName, e.g. "STAT 215B".'''
return jmespath.search('class.course.displayName', section)
def section_is_primary(section):
'''Return a section's primary status.'''
return jmespath.search('association.primary', section)
def enrollment_campus_uid(enrollment):
'''Return an enrollent's campus UID.'''
expr = "student.identifiers[?disclose && type=='campus-uid'].id | [0]"
return jmespath.search(expr, enrollment)
def enrollment_campus_email(enrollment):
'''Return an enrollment's campus email if found, otherwise
return any other email.'''
expr = "student.emails[?type.code=='CAMP'].emailAddress | [0]"
email = jmespath.search(expr, enrollment)
if email: return email
expr = "student.emails[?type.code=='OTHR'].emailAddress | [0]"
return jmespath.search(expr, enrollment)
def get_enrollment_uids(enrollments):
'''Given an SIS enrollment, return the student's campus UID.'''
return list(map(lambda x: enrollment_campus_uid(x), enrollments))
def get_enrollment_emails(enrollments):
'''Given an SIS enrollment, return the student's campus email.'''
return list(map(lambda x: enrollment_campus_email(x), enrollments))
def enrollment_status(enrollment):
'''Return an enrollment's status, e.g. 'E', 'W', or 'D'.'''
return jmespath.search('enrollmentStatus.status.code', enrollment)
def filter_enrollment_status(enrollments, status):
return list(filter(lambda x: enrollment_status(x) == status, enrollments))
def status_code(constituents):
return {'enrolled':'E', 'waitlisted':'W', 'dropped':'D'}[constituents]
async def get_students(term_id, class_number, constituents, credentials, exact, identifier='campus-uid'):
'''Given a term and class section number, return the student ids.'''
if exact:
# get all enrollments for this section
enrollments = await get_section_enrollments(
credentials['enrollments_id'], credentials['enrollments_key'],
term_id, class_number
)
else:
# get the data for the specified section
section = await classes.get_sections_by_id(
credentials['classes_id'], credentials['classes_key'],
term_id, class_number, include_secondary='true'
)
# extract the subject area and catalog number, e.g. STAT C8
subject_area = section_subject_area(section)
catalog_number = section_catalog_number(section)
logger.info(f"{subject_area} {catalog_number}")
# get enrollments in all matching sections
enrollments = await get_enrollments(
credentials['enrollments_id'], credentials['enrollments_key'],
term_id, subject_area, catalog_number
)
if constituents == 'students':
constituent_enrollments = enrollments
else:
# filter for those enrollments with a specific status code
constituent_enrollments = filter_enrollment_status(
enrollments, status_code(constituents))
# function to extract an enrollment attribute
if identifier == 'campus-uid':
enrollment_attr_fn = enrollment_campus_uid
else:
enrollment_attr_fn = enrollment_campus_email
logger.debug(f"constituent_enrollments: {constituent_enrollments}")
# we convert to a set to collapse overlapping enrollments between
# lectures and labs (if not exact)
return set(map(lambda x: enrollment_attr_fn(x), constituent_enrollments))
def filter_lectures(sections, relevant_codes=section_codes):
'''
Given a list of SIS sections:
[{'code': '32227', 'description': '2019 Spring ASTRON 128 001 LAB 001'}]
return only the section codes which are lectures.
'''
codes = []
for section in sections:
if 'description' not in section: continue
desc_words = set(section['description'].split())
if len(set(desc_words) & set(relevant_codes)) > 0:
codes.append(section['code'])
return codes
async def get_lecture_section_ids(app_id, app_key, term_id, subject_area, catalog_number=None):
'''
Given a term, subject, and course number, return the lecture section ids.
We only care about the lecture enrollments since they contain a superset
of the enrollments of all other section types (lab, dis).
'''
uri = enrollments_uri + f'/terms/{term_id}/classes/sections/descriptors'
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
'page-number': 1,
"subject-area-code": subject_area
}
if catalog_number:
params["catalog-number"] = catalog_number
# Retrieve the sections associated with the course which includes
# both lecture and sections.
sections = await sis.get_items(uri, params, headers, 'fieldValues')
return filter_lectures(sections)
async def get_enrollments(app_id, app_key, term_id, subject_area, catalog_number):
'''Gets a course's enrollments from the SIS.'''
logger.info(f"get_enrollments: {subject_area} {catalog_number}")
# get the lectures
lecture_codes = await get_lecture_section_ids(app_id, app_key, term_id,
subject_area, catalog_number)
# get the enrollments in each lecture
enrollments = []
for section_id in lecture_codes:
enrollments += await get_section_enrollments(app_id, app_key, term_id, section_id)
logger.info(f'enrollments: {len(enrollments)}')
return enrollments
| [
"logging.basicConfig",
"jmespath.search",
"logging.getLogger"
]
| [((106, 167), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.WARNING'}), '(stream=sys.stdout, level=logging.WARNING)\n', (125, 167), False, 'import logging\n'), ((177, 204), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (194, 204), False, 'import logging\n'), ((1331, 1364), 'jmespath.search', 'jmespath.search', (['flt', 'enrollments'], {}), '(flt, enrollments)\n', (1346, 1364), False, 'import jmespath\n'), ((2131, 2188), 'jmespath.search', 'jmespath.search', (['"""class.course.subjectArea.code"""', 'section'], {}), "('class.course.subjectArea.code', section)\n", (2146, 2188), False, 'import jmespath\n'), ((2306, 2370), 'jmespath.search', 'jmespath.search', (['"""class.course.catalogNumber.formatted"""', 'section'], {}), "('class.course.catalogNumber.formatted', section)\n", (2321, 2370), False, 'import jmespath\n'), ((2478, 2530), 'jmespath.search', 'jmespath.search', (['"""class.course.displayName"""', 'section'], {}), "('class.course.displayName', section)\n", (2493, 2530), False, 'import jmespath\n'), ((2621, 2668), 'jmespath.search', 'jmespath.search', (['"""association.primary"""', 'section'], {}), "('association.primary', section)\n", (2636, 2668), False, 'import jmespath\n'), ((2839, 2872), 'jmespath.search', 'jmespath.search', (['expr', 'enrollment'], {}), '(expr, enrollment)\n', (2854, 2872), False, 'import jmespath\n'), ((3091, 3124), 'jmespath.search', 'jmespath.search', (['expr', 'enrollment'], {}), '(expr, enrollment)\n', (3106, 3124), False, 'import jmespath\n'), ((3230, 3263), 'jmespath.search', 'jmespath.search', (['expr', 'enrollment'], {}), '(expr, enrollment)\n', (3245, 3263), False, 'import jmespath\n'), ((3735, 3794), 'jmespath.search', 'jmespath.search', (['"""enrollmentStatus.status.code"""', 'enrollment'], {}), "('enrollmentStatus.status.code', enrollment)\n", (3750, 3794), False, 'import jmespath\n')] |
from flatland.envs.agent_utils import RailAgentStatus
from flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters
from flatland.envs.observations import GlobalObsForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.utils.rendertools import RenderTool
import random
import sys
import os
import time
import msgpack
import json
from PIL import Image
import argparse as ap
def RandomTestParams(tid):
seed = tid * 19997 + 997
random.seed(seed)
width = 50 + random.randint(0, 100)
height = 50 + random.randint(0, 100)
nr_cities = 4 + random.randint(0, (width + height) // 10)
nr_trains = min(nr_cities * 20, 100 + random.randint(0, 100))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, 5)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
return (
seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration
)
def RandomTestParams_small(tid):
seed = tid * 19997 + 997
random.seed(seed)
nSize = random.randint(0,5)
width = 20 + nSize * 5
height = 20 + nSize * 5
nr_cities = 2 + nSize // 2 + random.randint(0,2)
nr_trains = min(nr_cities * 5, 5 + random.randint(0,5)) #, 10 + random.randint(0, 10))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, nSize)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
return (
seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration
)
def ShouldRunTest(tid):
return tid >= 7
#return tid >= 3
return True
def create_test_env(fnParams, nTest, sDir):
(seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration) = fnParams(nTest)
#if not ShouldRunTest(test_id):
# continue
rail_generator = sparse_rail_generator(
max_num_cities=nr_cities,
seed=seed,
grid_mode=False,
max_rails_between_cities=max_rails_between_cities,
max_rails_in_city=max_rails_in_cities,
)
#stochastic_data = {'malfunction_rate': malfunction_rate,
# 'min_duration': malfunction_min_duration,
# 'max_duration': malfunction_max_duration
# }
stochastic_data = MalfunctionParameters(malfunction_rate=malfunction_rate,
min_duration=malfunction_min_duration,
max_duration=malfunction_max_duration
)
observation_builder = GlobalObsForRailEnv()
DEFAULT_SPEED_RATIO_MAP = {
1.: 0.25,
1. / 2.: 0.25,
1. / 3.: 0.25,
1. / 4.: 0.25}
schedule_generator = sparse_schedule_generator(DEFAULT_SPEED_RATIO_MAP)
for iAttempt in range(5):
try:
env = RailEnv(
width=width,
height=height,
rail_generator=rail_generator,
schedule_generator=schedule_generator,
number_of_agents=nr_trains,
malfunction_generator_and_process_data=malfunction_from_params(stochastic_data),
obs_builder_object=observation_builder,
remove_agents_at_target=True
)
obs = env.reset(random_seed = seed)
break
except ValueError as oErr:
print("Error:", oErr)
width += 5
height += 5
print("Try again with larger env: (w,h):", width, height)
if not os.path.exists(sDir):
os.makedirs(sDir)
sfName = "{}/Level_{}.mpk".format(sDir, nTest)
if os.path.exists(sfName):
os.remove(sfName)
env.save(sfName)
sys.stdout.write(".")
sys.stdout.flush()
return env
#env = create_test_env(RandomTestParams_small, 0, "train-envs-small/Test_0")
def createEnvSet(nStart, nEnd, sDir, bSmall=True):
#print("Generate small envs in train-envs-small:")
print(f"Generate envs (small={bSmall}) in dir {sDir}:")
sDirImages = "train-envs-small/images/"
if not os.path.exists(sDirImages):
os.makedirs(sDirImages)
for test_id in range(nStart, nEnd, 1):
env = create_test_env(RandomTestParams_small, test_id, sDir)
oRender = RenderTool(env, gl="PILSVG")
#oRender.env = env
#oRender.set_new_rail()
oRender.render_env()
g2img = oRender.get_image()
imgPIL = Image.fromarray(g2img)
#imgPIL.show()
imgPIL.save(sDirImages + "Level_{}.png".format(test_id))
# print("Generate large envs in train-envs-1000:")
# for test_id in range(100):
# create_test_env(RandomTestParams, test_id, "train-envs-1000/Test_0")
def merge(sfEpisode, sfEnv, sfEnvOut, bJson=False):
if bJson:
with open(sfEpisode, "rb") as fEp:
oActions = json.load(fEp)
oEp = {"actions":oActions}
print("json oEp:", type(oEp), list(oEp.keys()))
else:
with open(sfEpisode, "rb") as fEp:
oEp = msgpack.load(fEp)
print("oEp:", type(oEp), list(oEp.keys()))
with open(sfEnv, "rb") as fEnv:
oEnv = msgpack.load(fEnv)
print("oEnv:", type(oEnv), list(oEnv.keys()))
# merge dicts
oEnv2 = {**oEp, **oEnv}
print("Merged keys:", list(oEnv2.keys()))
with open(sfEnvOut, "wb") as fEnv:
msgpack.dump(oEnv2, fEnv)
def printKeys1(sfEnv):
with open(sfEnv, "rb") as fEnv:
oEnv = msgpack.load(fEnv, encoding="utf-8")
print(sfEnv, "keys:", list(oEnv.keys()))
for sKey in oEnv.keys():
print("key", sKey, len(oEnv[sKey]))
if sKey == "shape":
print("shape: ", oEnv[sKey] )
def printKeys(sfEnvs):
try:
for sfEnv in sfEnvs:
printKeys1(sfEnv)
except:
# assume single env
printKeys1(sfEnvs)
def main2():
parser = ap.ArgumentParser(description='Generate envs, merge episodes into env files.')
parser.add_argument("-c", '--createEnvs', type=int, nargs=2, action="append",
metavar=("nStart", "nEnd"),
help='merge episode into env')
parser.add_argument("-d", "--outDir", type=str, nargs=1, default="./test-envs-tmp")
parser.add_argument("-m", '--merge', type=str, nargs=3, action="append",
metavar=("episode", "env", "output_env"),
help='merge episode into env')
parser.add_argument("-j", '--mergejson', type=str, nargs=3, action="append",
metavar=("json", "env", "output_env"),
help='merge json actions into env, with key actions')
parser.add_argument('-k', "--keys", type=str, action='append', nargs="+",
help='print the keys in a file')
args=parser.parse_args()
print(args)
if args.merge:
print("merge:", args.merge)
merge(*args.merge[0])
if args.mergejson:
print("merge json:", args.mergejson)
merge(*args.mergejson[0], bJson=True)
if args.keys:
print("keys:", args.keys)
printKeys(args.keys[0])
if args.outDir:
print("outDir", args.outDir)
if args.createEnvs:
print("create Envs - ", *args.createEnvs[0])
createEnvSet(*args.createEnvs[0], sDir=args.outDir)
if __name__=="__main__":
main2()
| [
"os.path.exists",
"PIL.Image.fromarray",
"msgpack.load",
"flatland.envs.schedule_generators.sparse_schedule_generator",
"argparse.ArgumentParser",
"flatland.envs.observations.GlobalObsForRailEnv",
"os.makedirs",
"msgpack.dump",
"flatland.envs.malfunction_generators.malfunction_from_params",
"random.seed",
"sys.stdout.write",
"os.remove",
"json.load",
"flatland.utils.rendertools.RenderTool",
"sys.stdout.flush",
"flatland.envs.rail_generators.sparse_rail_generator",
"random.randint",
"flatland.envs.malfunction_generators.MalfunctionParameters"
]
| [((620, 637), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (631, 637), False, 'import random\n'), ((1379, 1396), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1390, 1396), False, 'import random\n'), ((1410, 1430), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (1424, 1430), False, 'import random\n'), ((2529, 2703), 'flatland.envs.rail_generators.sparse_rail_generator', 'sparse_rail_generator', ([], {'max_num_cities': 'nr_cities', 'seed': 'seed', 'grid_mode': '(False)', 'max_rails_between_cities': 'max_rails_between_cities', 'max_rails_in_city': 'max_rails_in_cities'}), '(max_num_cities=nr_cities, seed=seed, grid_mode=False,\n max_rails_between_cities=max_rails_between_cities, max_rails_in_city=\n max_rails_in_cities)\n', (2550, 2703), False, 'from flatland.envs.rail_generators import sparse_rail_generator\n'), ((2991, 3130), 'flatland.envs.malfunction_generators.MalfunctionParameters', 'MalfunctionParameters', ([], {'malfunction_rate': 'malfunction_rate', 'min_duration': 'malfunction_min_duration', 'max_duration': 'malfunction_max_duration'}), '(malfunction_rate=malfunction_rate, min_duration=\n malfunction_min_duration, max_duration=malfunction_max_duration)\n', (3012, 3130), False, 'from flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters\n'), ((3226, 3247), 'flatland.envs.observations.GlobalObsForRailEnv', 'GlobalObsForRailEnv', ([], {}), '()\n', (3245, 3247), False, 'from flatland.envs.observations import GlobalObsForRailEnv\n'), ((3395, 3445), 'flatland.envs.schedule_generators.sparse_schedule_generator', 'sparse_schedule_generator', (['DEFAULT_SPEED_RATIO_MAP'], {}), '(DEFAULT_SPEED_RATIO_MAP)\n', (3420, 3445), False, 'from flatland.envs.schedule_generators import sparse_schedule_generator\n'), ((4311, 4333), 'os.path.exists', 'os.path.exists', (['sfName'], {}), '(sfName)\n', (4325, 4333), False, 'import os\n'), ((4387, 4408), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (4403, 4408), False, 'import sys\n'), ((4413, 4431), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4429, 4431), False, 'import sys\n'), ((6620, 6698), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {'description': '"""Generate envs, merge episodes into env files."""'}), "(description='Generate envs, merge episodes into env files.')\n", (6637, 6698), True, 'import argparse as ap\n'), ((655, 677), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (669, 677), False, 'import random\n'), ((696, 718), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (710, 718), False, 'import random\n'), ((739, 780), 'random.randint', 'random.randint', (['(0)', '((width + height) // 10)'], {}), '(0, (width + height) // 10)\n', (753, 780), False, 'import random\n'), ((910, 930), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (924, 930), False, 'import random\n'), ((959, 981), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (973, 981), False, 'import random\n'), ((1017, 1037), 'random.randint', 'random.randint', (['(0)', '(7)'], {}), '(0, 7)\n', (1031, 1037), False, 'import random\n'), ((1074, 1095), 'random.randint', 'random.randint', (['(0)', '(80)'], {}), '(0, 80)\n', (1088, 1095), False, 'import random\n'), ((1519, 1539), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (1533, 1539), False, 'import random\n'), ((1693, 1717), 'random.randint', 'random.randint', (['(0)', 'nSize'], {}), '(0, nSize)\n', (1707, 1717), False, 'import random\n'), ((1746, 1768), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1760, 1768), False, 'import random\n'), ((1804, 1824), 'random.randint', 'random.randint', (['(0)', '(7)'], {}), '(0, 7)\n', (1818, 1824), False, 'import random\n'), ((1861, 1882), 'random.randint', 'random.randint', (['(0)', '(80)'], {}), '(0, 80)\n', (1875, 1882), False, 'import random\n'), ((4204, 4224), 'os.path.exists', 'os.path.exists', (['sDir'], {}), '(sDir)\n', (4218, 4224), False, 'import os\n'), ((4234, 4251), 'os.makedirs', 'os.makedirs', (['sDir'], {}), '(sDir)\n', (4245, 4251), False, 'import os\n'), ((4343, 4360), 'os.remove', 'os.remove', (['sfName'], {}), '(sfName)\n', (4352, 4360), False, 'import os\n'), ((4751, 4777), 'os.path.exists', 'os.path.exists', (['sDirImages'], {}), '(sDirImages)\n', (4765, 4777), False, 'import os\n'), ((4787, 4810), 'os.makedirs', 'os.makedirs', (['sDirImages'], {}), '(sDirImages)\n', (4798, 4810), False, 'import os\n'), ((4943, 4971), 'flatland.utils.rendertools.RenderTool', 'RenderTool', (['env'], {'gl': '"""PILSVG"""'}), "(env, gl='PILSVG')\n", (4953, 4971), False, 'from flatland.utils.rendertools import RenderTool\n'), ((5114, 5136), 'PIL.Image.fromarray', 'Image.fromarray', (['g2img'], {}), '(g2img)\n', (5129, 5136), False, 'from PIL import Image\n'), ((5845, 5863), 'msgpack.load', 'msgpack.load', (['fEnv'], {}), '(fEnv)\n', (5857, 5863), False, 'import msgpack\n'), ((6071, 6096), 'msgpack.dump', 'msgpack.dump', (['oEnv2', 'fEnv'], {}), '(oEnv2, fEnv)\n', (6083, 6096), False, 'import msgpack\n'), ((6180, 6216), 'msgpack.load', 'msgpack.load', (['fEnv'], {'encoding': '"""utf-8"""'}), "(fEnv, encoding='utf-8')\n", (6192, 6216), False, 'import msgpack\n'), ((823, 845), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (837, 845), False, 'import random\n'), ((1578, 1598), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (1592, 1598), False, 'import random\n'), ((5531, 5545), 'json.load', 'json.load', (['fEp'], {}), '(fEp)\n', (5540, 5545), False, 'import json\n'), ((5716, 5733), 'msgpack.load', 'msgpack.load', (['fEp'], {}), '(fEp)\n', (5728, 5733), False, 'import msgpack\n'), ((3778, 3818), 'flatland.envs.malfunction_generators.malfunction_from_params', 'malfunction_from_params', (['stochastic_data'], {}), '(stochastic_data)\n', (3801, 3818), False, 'from flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 2.2.4 on 2019-08-21 19:53
# this file is auto-generated so don't do flake8 on it
# flake8: noqa
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
import django.utils.timezone
def copy_date_done_to_date_created(apps, schema_editor):
TaskResult = apps.get_model('django_celery_results', 'taskresult')
db_alias = schema_editor.connection.alias
TaskResult.objects.using(db_alias).all().update(
date_created=models.F('date_done')
)
def reverse_copy_date_done_to_date_created(app, schema_editor):
# the reverse of 'copy_date_done_to_date_created' is do nothing
# because the 'date_created' will be removed.
pass
class Migration(migrations.Migration):
dependencies = [
('django_celery_results', '0005_taskresult_worker'),
]
operations = [
migrations.AddField(
model_name='taskresult',
name='date_created',
field=models.DateTimeField(
auto_now_add=True,
db_index=True,
default=django.utils.timezone.now,
help_text='Datetime field when the task result was created in UTC',
verbose_name='Created DateTime'
),
preserve_default=False,
),
migrations.RunPython(copy_date_done_to_date_created,
reverse_copy_date_done_to_date_created),
]
| [
"django.db.models.DateTimeField",
"django.db.migrations.RunPython",
"django.db.models.F"
]
| [((1351, 1447), 'django.db.migrations.RunPython', 'migrations.RunPython', (['copy_date_done_to_date_created', 'reverse_copy_date_done_to_date_created'], {}), '(copy_date_done_to_date_created,\n reverse_copy_date_done_to_date_created)\n', (1371, 1447), False, 'from django.db import migrations, models\n'), ((522, 543), 'django.db.models.F', 'models.F', (['"""date_done"""'], {}), "('date_done')\n", (530, 543), False, 'from django.db import migrations, models\n'), ((1010, 1215), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'db_index': '(True)', 'default': 'django.utils.timezone.now', 'help_text': '"""Datetime field when the task result was created in UTC"""', 'verbose_name': '"""Created DateTime"""'}), "(auto_now_add=True, db_index=True, default=django.utils\n .timezone.now, help_text=\n 'Datetime field when the task result was created in UTC', verbose_name=\n 'Created DateTime')\n", (1030, 1215), False, 'from django.db import migrations, models\n')] |
import torch.nn as nn
from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer
class BBB3Conv3FC(nn.Module):
"""
Simple Neural Network having 3 Convolution
and 3 FC layers with Bayesian layers.
"""
def __init__(self, outputs, inputs):
super(BBB3Conv3FC, self).__init__()
self.conv1 = BBBConv2d(inputs, 32, 5, stride=1, padding=2)
self.soft1 = nn.Softplus()
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = BBBConv2d(32, 64, 5, stride=1, padding=2)
self.soft2 = nn.Softplus()
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv3 = BBBConv2d(64, 128, 5, stride=1, padding=1)
self.soft3 = nn.Softplus()
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2)
self.flatten = FlattenLayer(2 * 2 * 128)
self.fc1 = BBBLinearFactorial(2 * 2 * 128, 1000)
self.soft5 = nn.Softplus()
self.fc2 = BBBLinearFactorial(1000, 1000)
self.soft6 = nn.Softplus()
self.fc3 = BBBLinearFactorial(1000, outputs)
layers = [self.conv1, self.soft1, self.pool1, self.conv2, self.soft2, self.pool2,
self.conv3, self.soft3, self.pool3, self.flatten, self.fc1, self.soft5,
self.fc2, self.soft6, self.fc3]
self.layers = nn.ModuleList(layers)
def probforward(self, x):
'Forward pass with Bayesian weights'
kl = 0
for layer in self.layers:
if hasattr(layer, 'convprobforward') and callable(layer.convprobforward):
x, _kl, = layer.convprobforward(x)
kl += _kl
elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward):
x, _kl, = layer.fcprobforward(x)
kl += _kl
else:
x = layer(x)
logits = x
return logits, kl | [
"torch.nn.Softplus",
"utils.BBBlayers.FlattenLayer",
"torch.nn.ModuleList",
"utils.BBBlayers.BBBLinearFactorial",
"utils.BBBlayers.BBBConv2d",
"torch.nn.MaxPool2d"
]
| [((337, 382), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['inputs', '(32)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(inputs, 32, 5, stride=1, padding=2)\n', (346, 382), False, 'from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer\n'), ((404, 417), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (415, 417), True, 'import torch.nn as nn\n'), ((439, 476), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (451, 476), True, 'import torch.nn as nn\n'), ((499, 540), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(32)', '(64)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(32, 64, 5, stride=1, padding=2)\n', (508, 540), False, 'from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer\n'), ((562, 575), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (573, 575), True, 'import torch.nn as nn\n'), ((597, 634), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (609, 634), True, 'import torch.nn as nn\n'), ((657, 699), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(64)', '(128)', '(5)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 128, 5, stride=1, padding=1)\n', (666, 699), False, 'from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer\n'), ((721, 734), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (732, 734), True, 'import torch.nn as nn\n'), ((756, 793), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (768, 793), True, 'import torch.nn as nn\n'), ((818, 843), 'utils.BBBlayers.FlattenLayer', 'FlattenLayer', (['(2 * 2 * 128)'], {}), '(2 * 2 * 128)\n', (830, 843), False, 'from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer\n'), ((863, 900), 'utils.BBBlayers.BBBLinearFactorial', 'BBBLinearFactorial', (['(2 * 2 * 128)', '(1000)'], {}), '(2 * 2 * 128, 1000)\n', (881, 900), False, 'from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer\n'), ((922, 935), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (933, 935), True, 'import torch.nn as nn\n'), ((956, 986), 'utils.BBBlayers.BBBLinearFactorial', 'BBBLinearFactorial', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (974, 986), False, 'from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer\n'), ((1008, 1021), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (1019, 1021), True, 'import torch.nn as nn\n'), ((1042, 1075), 'utils.BBBlayers.BBBLinearFactorial', 'BBBLinearFactorial', (['(1000)', 'outputs'], {}), '(1000, outputs)\n', (1060, 1075), False, 'from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer\n'), ((1330, 1351), 'torch.nn.ModuleList', 'nn.ModuleList', (['layers'], {}), '(layers)\n', (1343, 1351), True, 'import torch.nn as nn\n')] |
import os
from tensorflow.contrib.learn.python.learn.datasets import base
import numpy as np
import IPython
from subprocess import call
from keras.preprocessing import image
from influence.dataset import DataSet
from influence.inception_v3 import preprocess_input
BASE_DIR = 'data' # TODO: change
def fill(X, Y, idx, label, img_path, img_side):
img = image.load_img(img_path, target_size=(img_side, img_side))
x = image.img_to_array(img)
X[idx, ...] = x
Y[idx] = label
def extract_and_rename_animals():
class_maps = [
('dog', 'n02084071'),
('cat', 'n02121808'),
('bird', 'n01503061'),
('fish', 'n02512053'),
('horse', 'n02374451'),
('monkey', 'n02484322'),
('zebra', 'n02391049'),
('panda', 'n02510455'),
('lemur', 'n02496913'),
('wombat', 'n01883070'),
]
for class_string, class_id in class_maps:
class_dir = os.path.join(BASE_DIR, class_string)
print(class_dir)
call('mkdir %s' % class_dir, shell=True)
call('tar -xf %s.tar -C %s' % (os.path.join(BASE_DIR, class_id), class_dir), shell=True)
for filename in os.listdir(class_dir):
file_idx = filename.split('_')[1].split('.')[0]
src_filename = os.path.join(class_dir, filename)
dst_filename = os.path.join(class_dir, '%s_%s.JPEG' % (class_string, file_idx))
os.rename(src_filename, dst_filename)
def load_animals(num_train_ex_per_class=300,
num_test_ex_per_class=100,
num_valid_ex_per_class=0,
classes=None,
):
num_channels = 3
img_side = 299
if num_valid_ex_per_class == 0:
valid_str = ''
else:
valid_str = '_valid-%s' % num_valid_examples
if classes is None:
classes = ['dog', 'cat', 'bird', 'fish', 'horse', 'monkey', 'zebra', 'panda', 'lemur', 'wombat']
data_filename = os.path.join(BASE_DIR, 'dataset_train-%s_test-%s%s.npz' % (num_train_ex_per_class, num_test_ex_per_class, valid_str))
else:
data_filename = os.path.join(BASE_DIR, 'dataset_%s_train-%s_test-%s%s.npz' % ('-'.join(classes), num_train_ex_per_class, num_test_ex_per_class, valid_str))
num_classes = len(classes)
num_train_examples = num_train_ex_per_class * num_classes
num_test_examples = num_test_ex_per_class * num_classes
num_valid_examples = num_valid_ex_per_class * num_classes
if os.path.exists(data_filename):
print('Loading animals from disk...')
f = np.load(data_filename)
X_train = f['X_train']
X_test = f['X_test']
Y_train = f['Y_train']
Y_test = f['Y_test']
if 'X_valid' in f:
X_valid = f['X_valid']
else:
X_valid = None
if 'Y_valid' in f:
Y_valid = f['Y_valid']
else:
Y_valid = None
else:
print('Reading animals from raw images...')
X_train = np.zeros([num_train_examples, img_side, img_side, num_channels])
X_test = np.zeros([num_test_examples, img_side, img_side, num_channels])
# X_valid = np.zeros([num_valid_examples, img_side, img_side, num_channels])
X_valid = None
Y_train = np.zeros([num_train_examples])
Y_test = np.zeros([num_test_examples])
# Y_valid = np.zeros([num_valid_examples])
Y_valid = None
for class_idx, class_string in enumerate(classes):
print('class: %s' % class_string)
# For some reason, a lot of numbers are skipped.
i = 0
num_filled = 0
while num_filled < num_train_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
print(img_path)
if os.path.exists(img_path):
fill(X_train, Y_train, num_filled + (num_train_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
num_filled = 0
while num_filled < num_test_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
if os.path.exists(img_path):
fill(X_test, Y_test, num_filled + (num_test_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
num_filled = 0
while num_filled < num_valid_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
if os.path.exists(img_path):
fill(X_valid, Y_valid, num_filled + (num_valid_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
X_train = preprocess_input(X_train)
X_test = preprocess_input(X_test)
X_valid = preprocess_input(X_valid)
np.random.seed(0)
permutation_idx = np.arange(num_train_examples)
np.random.shuffle(permutation_idx)
X_train = X_train[permutation_idx, :]
Y_train = Y_train[permutation_idx]
permutation_idx = np.arange(num_test_examples)
np.random.shuffle(permutation_idx)
X_test = X_test[permutation_idx, :]
Y_test = Y_test[permutation_idx]
permutation_idx = np.arange(num_valid_examples)
np.random.shuffle(permutation_idx)
X_valid = X_valid[permutation_idx, :]
Y_valid = Y_valid[permutation_idx]
np.savez_compressed(data_filename, X_train=X_train, Y_train=Y_train, X_test=X_test, Y_test=Y_test, X_valid=X_valid, Y_valid=Y_valid)
train = DataSet(X_train, Y_train)
if (X_valid is not None) and (Y_valid is not None):
# validation = DataSet(X_valid, Y_valid)
validation = None
else:
validation = None
test = DataSet(X_test, Y_test)
return base.Datasets(train=train, validation=validation, test=test)
def load_koda():
num_channels = 3
img_side = 299
data_filename = os.path.join(BASE_DIR, 'dataset_koda.npz')
if os.path.exists(data_filename):
print('Loading Koda from disk...')
f = np.load(data_filename)
X = f['X']
Y = f['Y']
else:
# Returns all class 0
print('Reading Koda from raw images...')
image_files = [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg'))]
# Hack to get the image files in the right order
# image_files = [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and not image_file.startswith('124'))]
# image_files += [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and image_file.startswith('124'))]
num_examples = len(image_files)
X = np.zeros([num_examples, img_side, img_side, num_channels])
Y = np.zeros([num_examples])
class_idx = 0
for counter, image_file in enumerate(image_files):
img_path = os.path.join(BASE_DIR, 'koda', image_file)
fill(X, Y, counter, class_idx, img_path, img_side)
X = preprocess_input(X)
np.savez(data_filename, X=X, Y=Y)
return X, Y
def load_dogfish_with_koda():
classes = ['dog', 'fish']
X_test, Y_test = load_koda()
data_sets = load_animals(num_train_ex_per_class=900,
num_test_ex_per_class=300,
num_valid_ex_per_class=0,
classes=classes)
train = data_sets.train
validation = data_sets.validation
test = DataSet(X_test, Y_test)
return base.Datasets(train=train, validation=validation, test=test)
def load_dogfish_with_orig_and_koda():
classes = ['dog', 'fish']
X_test, Y_test = load_koda()
X_test = np.reshape(X_test, (X_test.shape[0], -1))
data_sets = load_animals(num_train_ex_per_class=900,
num_test_ex_per_class=300,
num_valid_ex_per_class=0,
classes=classes)
train = data_sets.train
validation = data_sets.validation
test = DataSet(
np.concatenate((data_sets.test.x, X_test), axis=0),
np.concatenate((data_sets.test.labels, Y_test), axis=0))
return base.Datasets(train=train, validation=validation, test=test)
| [
"keras.preprocessing.image.img_to_array",
"numpy.arange",
"influence.inception_v3.preprocess_input",
"os.path.exists",
"numpy.savez",
"os.listdir",
"numpy.reshape",
"subprocess.call",
"numpy.random.seed",
"numpy.concatenate",
"numpy.savez_compressed",
"os.rename",
"tensorflow.contrib.learn.python.learn.datasets.base.Datasets",
"keras.preprocessing.image.load_img",
"os.path.join",
"numpy.zeros",
"influence.dataset.DataSet",
"numpy.load",
"numpy.random.shuffle"
]
| [((361, 419), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(img_side, img_side)'}), '(img_path, target_size=(img_side, img_side))\n', (375, 419), False, 'from keras.preprocessing import image\n'), ((428, 451), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (446, 451), False, 'from keras.preprocessing import image\n'), ((2507, 2536), 'os.path.exists', 'os.path.exists', (['data_filename'], {}), '(data_filename)\n', (2521, 2536), False, 'import os\n'), ((5927, 5952), 'influence.dataset.DataSet', 'DataSet', (['X_train', 'Y_train'], {}), '(X_train, Y_train)\n', (5934, 5952), False, 'from influence.dataset import DataSet\n'), ((6132, 6155), 'influence.dataset.DataSet', 'DataSet', (['X_test', 'Y_test'], {}), '(X_test, Y_test)\n', (6139, 6155), False, 'from influence.dataset import DataSet\n'), ((6168, 6228), 'tensorflow.contrib.learn.python.learn.datasets.base.Datasets', 'base.Datasets', ([], {'train': 'train', 'validation': 'validation', 'test': 'test'}), '(train=train, validation=validation, test=test)\n', (6181, 6228), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((6309, 6351), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""dataset_koda.npz"""'], {}), "(BASE_DIR, 'dataset_koda.npz')\n", (6321, 6351), False, 'import os\n'), ((6360, 6389), 'os.path.exists', 'os.path.exists', (['data_filename'], {}), '(data_filename)\n', (6374, 6389), False, 'import os\n'), ((7934, 7957), 'influence.dataset.DataSet', 'DataSet', (['X_test', 'Y_test'], {}), '(X_test, Y_test)\n', (7941, 7957), False, 'from influence.dataset import DataSet\n'), ((7970, 8030), 'tensorflow.contrib.learn.python.learn.datasets.base.Datasets', 'base.Datasets', ([], {'train': 'train', 'validation': 'validation', 'test': 'test'}), '(train=train, validation=validation, test=test)\n', (7983, 8030), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((8148, 8189), 'numpy.reshape', 'np.reshape', (['X_test', '(X_test.shape[0], -1)'], {}), '(X_test, (X_test.shape[0], -1))\n', (8158, 8189), True, 'import numpy as np\n'), ((8595, 8655), 'tensorflow.contrib.learn.python.learn.datasets.base.Datasets', 'base.Datasets', ([], {'train': 'train', 'validation': 'validation', 'test': 'test'}), '(train=train, validation=validation, test=test)\n', (8608, 8655), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((954, 990), 'os.path.join', 'os.path.join', (['BASE_DIR', 'class_string'], {}), '(BASE_DIR, class_string)\n', (966, 990), False, 'import os\n'), ((1024, 1064), 'subprocess.call', 'call', (["('mkdir %s' % class_dir)"], {'shell': '(True)'}), "('mkdir %s' % class_dir, shell=True)\n", (1028, 1064), False, 'from subprocess import call\n'), ((1195, 1216), 'os.listdir', 'os.listdir', (['class_dir'], {}), '(class_dir)\n', (1205, 1216), False, 'import os\n'), ((1991, 2113), 'os.path.join', 'os.path.join', (['BASE_DIR', "('dataset_train-%s_test-%s%s.npz' % (num_train_ex_per_class,\n num_test_ex_per_class, valid_str))"], {}), "(BASE_DIR, 'dataset_train-%s_test-%s%s.npz' % (\n num_train_ex_per_class, num_test_ex_per_class, valid_str))\n", (2003, 2113), False, 'import os\n'), ((2596, 2618), 'numpy.load', 'np.load', (['data_filename'], {}), '(data_filename)\n', (2603, 2618), True, 'import numpy as np\n'), ((3028, 3092), 'numpy.zeros', 'np.zeros', (['[num_train_examples, img_side, img_side, num_channels]'], {}), '([num_train_examples, img_side, img_side, num_channels])\n', (3036, 3092), True, 'import numpy as np\n'), ((3110, 3173), 'numpy.zeros', 'np.zeros', (['[num_test_examples, img_side, img_side, num_channels]'], {}), '([num_test_examples, img_side, img_side, num_channels])\n', (3118, 3173), True, 'import numpy as np\n'), ((3301, 3331), 'numpy.zeros', 'np.zeros', (['[num_train_examples]'], {}), '([num_train_examples])\n', (3309, 3331), True, 'import numpy as np\n'), ((3349, 3378), 'numpy.zeros', 'np.zeros', (['[num_test_examples]'], {}), '([num_test_examples])\n', (3357, 3378), True, 'import numpy as np\n'), ((5073, 5098), 'influence.inception_v3.preprocess_input', 'preprocess_input', (['X_train'], {}), '(X_train)\n', (5089, 5098), False, 'from influence.inception_v3 import preprocess_input\n'), ((5116, 5140), 'influence.inception_v3.preprocess_input', 'preprocess_input', (['X_test'], {}), '(X_test)\n', (5132, 5140), False, 'from influence.inception_v3 import preprocess_input\n'), ((5159, 5184), 'influence.inception_v3.preprocess_input', 'preprocess_input', (['X_valid'], {}), '(X_valid)\n', (5175, 5184), False, 'from influence.inception_v3 import preprocess_input\n'), ((5194, 5211), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5208, 5211), True, 'import numpy as np\n'), ((5238, 5267), 'numpy.arange', 'np.arange', (['num_train_examples'], {}), '(num_train_examples)\n', (5247, 5267), True, 'import numpy as np\n'), ((5276, 5310), 'numpy.random.shuffle', 'np.random.shuffle', (['permutation_idx'], {}), '(permutation_idx)\n', (5293, 5310), True, 'import numpy as np\n'), ((5426, 5454), 'numpy.arange', 'np.arange', (['num_test_examples'], {}), '(num_test_examples)\n', (5435, 5454), True, 'import numpy as np\n'), ((5463, 5497), 'numpy.random.shuffle', 'np.random.shuffle', (['permutation_idx'], {}), '(permutation_idx)\n', (5480, 5497), True, 'import numpy as np\n'), ((5609, 5638), 'numpy.arange', 'np.arange', (['num_valid_examples'], {}), '(num_valid_examples)\n', (5618, 5638), True, 'import numpy as np\n'), ((5647, 5681), 'numpy.random.shuffle', 'np.random.shuffle', (['permutation_idx'], {}), '(permutation_idx)\n', (5664, 5681), True, 'import numpy as np\n'), ((5780, 5917), 'numpy.savez_compressed', 'np.savez_compressed', (['data_filename'], {'X_train': 'X_train', 'Y_train': 'Y_train', 'X_test': 'X_test', 'Y_test': 'Y_test', 'X_valid': 'X_valid', 'Y_valid': 'Y_valid'}), '(data_filename, X_train=X_train, Y_train=Y_train, X_test\n =X_test, Y_test=Y_test, X_valid=X_valid, Y_valid=Y_valid)\n', (5799, 5917), True, 'import numpy as np\n'), ((6446, 6468), 'numpy.load', 'np.load', (['data_filename'], {}), '(data_filename)\n', (6453, 6468), True, 'import numpy as np\n'), ((7170, 7228), 'numpy.zeros', 'np.zeros', (['[num_examples, img_side, img_side, num_channels]'], {}), '([num_examples, img_side, img_side, num_channels])\n', (7178, 7228), True, 'import numpy as np\n'), ((7241, 7265), 'numpy.zeros', 'np.zeros', (['[num_examples]'], {}), '([num_examples])\n', (7249, 7265), True, 'import numpy as np\n'), ((7490, 7509), 'influence.inception_v3.preprocess_input', 'preprocess_input', (['X'], {}), '(X)\n', (7506, 7509), False, 'from influence.inception_v3 import preprocess_input\n'), ((7519, 7552), 'numpy.savez', 'np.savez', (['data_filename'], {'X': 'X', 'Y': 'Y'}), '(data_filename, X=X, Y=Y)\n', (7527, 7552), True, 'import numpy as np\n'), ((8465, 8515), 'numpy.concatenate', 'np.concatenate', (['(data_sets.test.x, X_test)'], {'axis': '(0)'}), '((data_sets.test.x, X_test), axis=0)\n', (8479, 8515), True, 'import numpy as np\n'), ((8526, 8581), 'numpy.concatenate', 'np.concatenate', (['(data_sets.test.labels, Y_test)'], {'axis': '(0)'}), '((data_sets.test.labels, Y_test), axis=0)\n', (8540, 8581), True, 'import numpy as np\n'), ((1306, 1339), 'os.path.join', 'os.path.join', (['class_dir', 'filename'], {}), '(class_dir, filename)\n', (1318, 1339), False, 'import os\n'), ((1367, 1431), 'os.path.join', 'os.path.join', (['class_dir', "('%s_%s.JPEG' % (class_string, file_idx))"], {}), "(class_dir, '%s_%s.JPEG' % (class_string, file_idx))\n", (1379, 1431), False, 'import os\n'), ((1444, 1481), 'os.rename', 'os.rename', (['src_filename', 'dst_filename'], {}), '(src_filename, dst_filename)\n', (1453, 1481), False, 'import os\n'), ((7371, 7413), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""koda"""', 'image_file'], {}), "(BASE_DIR, 'koda', image_file)\n", (7383, 7413), False, 'import os\n'), ((3767, 3840), 'os.path.join', 'os.path.join', (['BASE_DIR', "('%s/%s_%s.JPEG' % (class_string, class_string, i))"], {}), "(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))\n", (3779, 3840), False, 'import os\n'), ((3892, 3916), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (3906, 3916), False, 'import os\n'), ((4257, 4330), 'os.path.join', 'os.path.join', (['BASE_DIR', "('%s/%s_%s.JPEG' % (class_string, class_string, i))"], {}), "(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))\n", (4269, 4330), False, 'import os\n'), ((4350, 4374), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (4364, 4374), False, 'import os\n'), ((4713, 4786), 'os.path.join', 'os.path.join', (['BASE_DIR', "('%s/%s_%s.JPEG' % (class_string, class_string, i))"], {}), "(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))\n", (4725, 4786), False, 'import os\n'), ((4806, 4830), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (4820, 4830), False, 'import os\n'), ((1104, 1136), 'os.path.join', 'os.path.join', (['BASE_DIR', 'class_id'], {}), '(BASE_DIR, class_id)\n', (1116, 1136), False, 'import os\n'), ((6660, 6690), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""koda"""'], {}), "(BASE_DIR, 'koda')\n", (6672, 6690), False, 'import os\n')] |
import OpenPNM
import numpy as np
import OpenPNM.Physics.models as pm
class GenericLinearTransportTest:
def setup_class(self):
self.net = OpenPNM.Network.Cubic(shape=[5, 5, 5])
self.phase = OpenPNM.Phases.GenericPhase(network=self.net)
Ps = self.net.Ps
Ts = self.net.Ts
self.phys = OpenPNM.Physics.GenericPhysics(network=self.net,
phase=self.phase,
pores=Ps, throats=Ts)
self.phys['throat.cond'] = 5e-8
self.alg = OpenPNM.Algorithms.GenericLinearTransport(network=self.net,
phase=self.phase)
def test_set_BC_modes_pores(self):
BC1_pores = np.arange(25, 35)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC1_pores)
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == BC1_pores)
BC2_pores = np.arange(43, 50)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC2_pores,
mode='merge')
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == np.concatenate((BC1_pores, BC2_pores)))
BC3_pores = np.arange(4, 9)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC3_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == BC3_pores)
BC4_pores = [11, 90]
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
pores=BC4_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Neumann')
assert np.all(ptest == BC4_pores)
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=BC1_pores,
bcvalue=0.3)
ptest = self.alg.pores('pore.Dirichlet')
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=self.alg.Ps,
mode='remove')
Dp = np.sum(self.alg['pore.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'pore.Neumann'
assert (label not in self.alg.labels())
def test_set_BC_modes_throats(self):
BC1_throats = np.arange(25, 35)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC1_throats)
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == BC1_throats)
BC2_throats = np.arange(43, 50)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC2_throats,
mode='merge')
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == np.concatenate((BC1_throats, BC2_throats)))
BC3_throats = np.arange(4, 9)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC3_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == BC3_throats)
BC4_throats = [11, 90]
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
throats=BC4_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Neumann')
assert np.all(t_test == BC4_throats)
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=BC1_throats,
bcvalue=0.3)
t_test = self.alg.throats('throat.Dirichlet')
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=self.alg.Ts,
mode='remove')
Dp = np.sum(self.alg['throat.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'throat.Neumann'
assert (label not in self.alg.labels())
def test_set_BC_modes_with_boolean_masks_pores(self):
BC1_pores = np.zeros(self.alg.Np, dtype='bool')
BC1_pores[np.arange(25, 35)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC1_pores)
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == self.alg._parse_locations(BC1_pores))
BC2_pores = np.zeros(self.alg.Np, dtype='bool')
BC2_pores[np.arange(43, 50)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC2_pores,
mode='merge')
ptest = self.alg.pores('pore.Dirichlet')
B1 = self.alg._parse_locations(BC1_pores)
B2 = self.alg._parse_locations(BC2_pores)
assert np.all(ptest == np.concatenate((B1, B2)))
BC3_pores = np.zeros(self.alg.Np, dtype='bool')
BC3_pores[np.arange(4, 9)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC3_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == self.alg._parse_locations(BC3_pores))
BC4_pores = np.zeros(self.alg.Np, dtype='bool')
BC4_pores[[11, 90]] = True
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
pores=BC4_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Neumann')
assert np.all(ptest == self.alg._parse_locations(BC4_pores))
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=BC1_pores,
bcvalue=0.3)
ptest = self.alg.pores('pore.Dirichlet')
removed_p = self.alg._parse_locations(self.alg.Ps)
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=removed_p,
mode='remove')
Dp = np.sum(self.alg['pore.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'pore.Neumann'
assert (label not in self.alg.labels())
def test_set_BC_modes_with_boolean_masks_throats(self):
BC1_throats = np.zeros(self.alg.Nt, dtype='bool')
BC1_throats[np.arange(25, 35)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC1_throats)
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == self.alg._parse_locations(BC1_throats))
BC2_throats = np.zeros(self.alg.Nt, dtype='bool')
BC2_throats[np.arange(43, 50)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC2_throats,
mode='merge')
t_test = self.alg.throats('throat.Dirichlet')
B1 = self.alg._parse_locations(BC1_throats)
B2 = self.alg._parse_locations(BC2_throats)
assert np.all(t_test == np.concatenate((B1, B2)))
BC3_throats = np.zeros(self.alg.Nt, dtype='bool')
BC3_throats[np.arange(4, 9)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC3_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == self.alg._parse_locations(BC3_throats))
BC4_throats = np.zeros(self.alg.Nt, dtype='bool')
BC4_throats[[11, 90]] = True
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
throats=BC4_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Neumann')
assert np.all(t_test == self.alg._parse_locations(BC4_throats))
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=BC1_throats,
bcvalue=0.3)
t_test = self.alg.throats('throat.Dirichlet')
removed_t = self.alg._parse_locations(self.alg.Ts)
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=removed_t,
mode='remove')
Dp = np.sum(self.alg['throat.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'pore.Neumann'
assert (label not in self.alg.labels())
def test_super_pore_conductance(self):
g_super = []
BC1_pores = np.arange(20, 30)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.4,
pores=BC1_pores)
BC2_pores = np.arange(45, 66)
self.alg.set_boundary_conditions(bctype='Neumann_group',
bcvalue=1.4e-10,
pores=BC2_pores)
g_super.append(2e-12)
BC3_pores = np.arange(87, 94)
self.alg.set_boundary_conditions(bctype='Neumann_group',
bcvalue=-0.9e-10,
pores=BC3_pores)
g_super.append(np.ones(len(BC3_pores)) * 1.5e-12)
BC4_pores = np.arange(3, 7)
self.alg.set_boundary_conditions(bctype='Neumann_group',
bcvalue=0.1e-10,
pores=BC4_pores)
g_super.append(np.array([6.42e-13]))
self.alg.run(conductance='throat.cond',
quantity='pore.mole_fraction',
super_pore_conductance=g_super)
self.alg.return_results()
r1 = self.alg.rate(BC1_pores)[0]
r2 = self.alg.rate(BC2_pores)[0]
r3 = self.alg.rate(BC3_pores)[0]
r4 = self.alg.rate(BC4_pores)[0]
assert np.absolute(r1 + r2 + r3 + r4) < 1e-20
assert np.size(self.alg.super_pore_conductance[0]) == 1
assert np.size(self.alg.super_pore_conductance[1]) == 7
assert np.size(self.alg.super_pore_conductance[2]) == 1
def test_source_term_modes(self):
self.phys['pore.item1'] = 0.5e-12
self.phys['pore.item2'] = 2.5
self.phys['pore.item3'] = -1.4e-11
self.phys.models.add(propname='pore.A',
model=pm.generic_source_term.power_law,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
x='mole_fraction',
return_rate=False,
regen_mode='on_demand')
self.phys.models.add(propname='pore.B',
model=pm.generic_source_term.linear,
A1='pore.item1',
A2='pore.item3',
x='mole_fraction',
return_rate=False,
regen_mode='on_demand')
S1_pores = np.arange(25, 35)
self.alg.set_source_term(source_name=['pore.A', 'pore.B'],
pores=S1_pores)
mask1 = ~np.isnan(self.alg['pore.source_nonlinear_s1_A'])
mask2 = ~np.isnan(self.alg['pore.source_nonlinear_s2_A'])
assert np.all(self.alg.Ps[mask1] == S1_pores)
assert np.all(self.alg.Ps[mask2] == S1_pores)
self.alg.set_source_term(source_name='pore.A',
pores=[26], x0=np.ones(self.phys.Np),
mode='update')
assert self.alg['pore.source_nonlinear_s1_A'][26] == 1.25e-12
S2_pores = np.array([30, 31])
self.alg.set_source_term(source_name='pore.A',
pores=S2_pores,
mode='overwrite')
mask1 = ~np.isnan(self.alg['pore.source_nonlinear_s1_A'])
assert np.all(self.alg.Ps[mask1] == S2_pores)
self.alg.set_source_term(source_name='pore.B',
pores=S1_pores,
mode='remove')
mask1 = np.isnan(self.alg['pore.source_nonlinear_s1_B'])
assert np.all(self.alg.Ps[mask1] == self.alg.Ps)
self.alg.set_source_term(source_name=['pore.A', 'pore.B'],
pores=self.alg.Ps,
mode='remove')
assert ('pore.source_B' in self.alg.labels())
assert ('pore.source_A' in self.alg.labels())
self.alg.set_source_term(source_name=['pore.A', 'pore.B'],
mode='remove')
assert ('pore.source_B' not in self.alg.labels())
assert ('pore.source_A' not in self.alg.labels())
| [
"OpenPNM.Physics.GenericPhysics",
"numpy.ones",
"OpenPNM.Algorithms.GenericLinearTransport",
"numpy.absolute",
"numpy.size",
"OpenPNM.Network.Cubic",
"numpy.sum",
"numpy.zeros",
"OpenPNM.Phases.GenericPhase",
"numpy.array",
"numpy.isnan",
"numpy.concatenate",
"numpy.all",
"numpy.arange"
]
| [((152, 190), 'OpenPNM.Network.Cubic', 'OpenPNM.Network.Cubic', ([], {'shape': '[5, 5, 5]'}), '(shape=[5, 5, 5])\n', (173, 190), False, 'import OpenPNM\n'), ((212, 257), 'OpenPNM.Phases.GenericPhase', 'OpenPNM.Phases.GenericPhase', ([], {'network': 'self.net'}), '(network=self.net)\n', (239, 257), False, 'import OpenPNM\n'), ((328, 420), 'OpenPNM.Physics.GenericPhysics', 'OpenPNM.Physics.GenericPhysics', ([], {'network': 'self.net', 'phase': 'self.phase', 'pores': 'Ps', 'throats': 'Ts'}), '(network=self.net, phase=self.phase, pores=Ps,\n throats=Ts)\n', (358, 420), False, 'import OpenPNM\n'), ((578, 655), 'OpenPNM.Algorithms.GenericLinearTransport', 'OpenPNM.Algorithms.GenericLinearTransport', ([], {'network': 'self.net', 'phase': 'self.phase'}), '(network=self.net, phase=self.phase)\n', (619, 655), False, 'import OpenPNM\n'), ((777, 794), 'numpy.arange', 'np.arange', (['(25)', '(35)'], {}), '(25, 35)\n', (786, 794), True, 'import numpy as np\n'), ((1032, 1058), 'numpy.all', 'np.all', (['(ptest == BC1_pores)'], {}), '(ptest == BC1_pores)\n', (1038, 1058), True, 'import numpy as np\n'), ((1079, 1096), 'numpy.arange', 'np.arange', (['(43)', '(50)'], {}), '(43, 50)\n', (1088, 1096), True, 'import numpy as np\n'), ((1465, 1480), 'numpy.arange', 'np.arange', (['(4)', '(9)'], {}), '(4, 9)\n', (1474, 1480), True, 'import numpy as np\n'), ((1777, 1803), 'numpy.all', 'np.all', (['(ptest == BC3_pores)'], {}), '(ptest == BC3_pores)\n', (1783, 1803), True, 'import numpy as np\n'), ((2125, 2151), 'numpy.all', 'np.all', (['(ptest == BC4_pores)'], {}), '(ptest == BC4_pores)\n', (2131, 2151), True, 'import numpy as np\n'), ((2564, 2598), 'numpy.sum', 'np.sum', (["self.alg['pore.Dirichlet']"], {}), "(self.alg['pore.Dirichlet'])\n", (2570, 2598), True, 'import numpy as np\n'), ((2880, 2897), 'numpy.arange', 'np.arange', (['(25)', '(35)'], {}), '(25, 35)\n', (2889, 2897), True, 'import numpy as np\n'), ((3144, 3173), 'numpy.all', 'np.all', (['(t_test == BC1_throats)'], {}), '(t_test == BC1_throats)\n', (3150, 3173), True, 'import numpy as np\n'), ((3196, 3213), 'numpy.arange', 'np.arange', (['(43)', '(50)'], {}), '(43, 50)\n', (3205, 3213), True, 'import numpy as np\n'), ((3598, 3613), 'numpy.arange', 'np.arange', (['(4)', '(9)'], {}), '(4, 9)\n', (3607, 3613), True, 'import numpy as np\n'), ((3919, 3948), 'numpy.all', 'np.all', (['(t_test == BC3_throats)'], {}), '(t_test == BC3_throats)\n', (3925, 3948), True, 'import numpy as np\n'), ((4281, 4310), 'numpy.all', 'np.all', (['(t_test == BC4_throats)'], {}), '(t_test == BC4_throats)\n', (4287, 4310), True, 'import numpy as np\n'), ((4734, 4770), 'numpy.sum', 'np.sum', (["self.alg['throat.Dirichlet']"], {}), "(self.alg['throat.Dirichlet'])\n", (4740, 4770), True, 'import numpy as np\n'), ((5069, 5104), 'numpy.zeros', 'np.zeros', (['self.alg.Np'], {'dtype': '"""bool"""'}), "(self.alg.Np, dtype='bool')\n", (5077, 5104), True, 'import numpy as np\n'), ((5460, 5495), 'numpy.zeros', 'np.zeros', (['self.alg.Np'], {'dtype': '"""bool"""'}), "(self.alg.Np, dtype='bool')\n", (5468, 5495), True, 'import numpy as np\n'), ((5994, 6029), 'numpy.zeros', 'np.zeros', (['self.alg.Np'], {'dtype': '"""bool"""'}), "(self.alg.Np, dtype='bool')\n", (6002, 6029), True, 'import numpy as np\n'), ((6442, 6477), 'numpy.zeros', 'np.zeros', (['self.alg.Np'], {'dtype': '"""bool"""'}), "(self.alg.Np, dtype='bool')\n", (6450, 6477), True, 'import numpy as np\n'), ((7328, 7362), 'numpy.sum', 'np.sum', (["self.alg['pore.Dirichlet']"], {}), "(self.alg['pore.Dirichlet'])\n", (7334, 7362), True, 'import numpy as np\n'), ((7663, 7698), 'numpy.zeros', 'np.zeros', (['self.alg.Nt'], {'dtype': '"""bool"""'}), "(self.alg.Nt, dtype='bool')\n", (7671, 7698), True, 'import numpy as np\n'), ((8070, 8105), 'numpy.zeros', 'np.zeros', (['self.alg.Nt'], {'dtype': '"""bool"""'}), "(self.alg.Nt, dtype='bool')\n", (8078, 8105), True, 'import numpy as np\n'), ((8622, 8657), 'numpy.zeros', 'np.zeros', (['self.alg.Nt'], {'dtype': '"""bool"""'}), "(self.alg.Nt, dtype='bool')\n", (8630, 8657), True, 'import numpy as np\n'), ((9086, 9121), 'numpy.zeros', 'np.zeros', (['self.alg.Nt'], {'dtype': '"""bool"""'}), "(self.alg.Nt, dtype='bool')\n", (9094, 9121), True, 'import numpy as np\n'), ((9997, 10033), 'numpy.sum', 'np.sum', (["self.alg['throat.Dirichlet']"], {}), "(self.alg['throat.Dirichlet'])\n", (10003, 10033), True, 'import numpy as np\n'), ((10336, 10353), 'numpy.arange', 'np.arange', (['(20)', '(30)'], {}), '(20, 30)\n', (10345, 10353), True, 'import numpy as np\n'), ((10547, 10564), 'numpy.arange', 'np.arange', (['(45)', '(66)'], {}), '(45, 66)\n', (10556, 10564), True, 'import numpy as np\n'), ((10796, 10813), 'numpy.arange', 'np.arange', (['(87)', '(94)'], {}), '(87, 94)\n', (10805, 10813), True, 'import numpy as np\n'), ((11074, 11089), 'numpy.arange', 'np.arange', (['(3)', '(7)'], {}), '(3, 7)\n', (11083, 11089), True, 'import numpy as np\n'), ((12853, 12870), 'numpy.arange', 'np.arange', (['(25)', '(35)'], {}), '(25, 35)\n', (12862, 12870), True, 'import numpy as np\n'), ((13134, 13172), 'numpy.all', 'np.all', (['(self.alg.Ps[mask1] == S1_pores)'], {}), '(self.alg.Ps[mask1] == S1_pores)\n', (13140, 13172), True, 'import numpy as np\n'), ((13188, 13226), 'numpy.all', 'np.all', (['(self.alg.Ps[mask2] == S1_pores)'], {}), '(self.alg.Ps[mask2] == S1_pores)\n', (13194, 13226), True, 'import numpy as np\n'), ((13490, 13508), 'numpy.array', 'np.array', (['[30, 31]'], {}), '([30, 31])\n', (13498, 13508), True, 'import numpy as np\n'), ((13745, 13783), 'numpy.all', 'np.all', (['(self.alg.Ps[mask1] == S2_pores)'], {}), '(self.alg.Ps[mask1] == S2_pores)\n', (13751, 13783), True, 'import numpy as np\n'), ((13952, 14000), 'numpy.isnan', 'np.isnan', (["self.alg['pore.source_nonlinear_s1_B']"], {}), "(self.alg['pore.source_nonlinear_s1_B'])\n", (13960, 14000), True, 'import numpy as np\n'), ((14016, 14057), 'numpy.all', 'np.all', (['(self.alg.Ps[mask1] == self.alg.Ps)'], {}), '(self.alg.Ps[mask1] == self.alg.Ps)\n', (14022, 14057), True, 'import numpy as np\n'), ((5123, 5140), 'numpy.arange', 'np.arange', (['(25)', '(35)'], {}), '(25, 35)\n', (5132, 5140), True, 'import numpy as np\n'), ((5514, 5531), 'numpy.arange', 'np.arange', (['(43)', '(50)'], {}), '(43, 50)\n', (5523, 5531), True, 'import numpy as np\n'), ((6048, 6063), 'numpy.arange', 'np.arange', (['(4)', '(9)'], {}), '(4, 9)\n', (6057, 6063), True, 'import numpy as np\n'), ((7719, 7736), 'numpy.arange', 'np.arange', (['(25)', '(35)'], {}), '(25, 35)\n', (7728, 7736), True, 'import numpy as np\n'), ((8126, 8143), 'numpy.arange', 'np.arange', (['(43)', '(50)'], {}), '(43, 50)\n', (8135, 8143), True, 'import numpy as np\n'), ((8678, 8693), 'numpy.arange', 'np.arange', (['(4)', '(9)'], {}), '(4, 9)\n', (8687, 8693), True, 'import numpy as np\n'), ((11294, 11314), 'numpy.array', 'np.array', (['[6.42e-13]'], {}), '([6.42e-13])\n', (11302, 11314), True, 'import numpy as np\n'), ((11682, 11712), 'numpy.absolute', 'np.absolute', (['(r1 + r2 + r3 + r4)'], {}), '(r1 + r2 + r3 + r4)\n', (11693, 11712), True, 'import numpy as np\n'), ((11736, 11779), 'numpy.size', 'np.size', (['self.alg.super_pore_conductance[0]'], {}), '(self.alg.super_pore_conductance[0])\n', (11743, 11779), True, 'import numpy as np\n'), ((11800, 11843), 'numpy.size', 'np.size', (['self.alg.super_pore_conductance[1]'], {}), '(self.alg.super_pore_conductance[1])\n', (11807, 11843), True, 'import numpy as np\n'), ((11864, 11907), 'numpy.size', 'np.size', (['self.alg.super_pore_conductance[2]'], {}), '(self.alg.super_pore_conductance[2])\n', (11871, 11907), True, 'import numpy as np\n'), ((13004, 13052), 'numpy.isnan', 'np.isnan', (["self.alg['pore.source_nonlinear_s1_A']"], {}), "(self.alg['pore.source_nonlinear_s1_A'])\n", (13012, 13052), True, 'import numpy as np\n'), ((13070, 13118), 'numpy.isnan', 'np.isnan', (["self.alg['pore.source_nonlinear_s2_A']"], {}), "(self.alg['pore.source_nonlinear_s2_A'])\n", (13078, 13118), True, 'import numpy as np\n'), ((13681, 13729), 'numpy.isnan', 'np.isnan', (["self.alg['pore.source_nonlinear_s1_A']"], {}), "(self.alg['pore.source_nonlinear_s1_A'])\n", (13689, 13729), True, 'import numpy as np\n'), ((1405, 1443), 'numpy.concatenate', 'np.concatenate', (['(BC1_pores, BC2_pores)'], {}), '((BC1_pores, BC2_pores))\n', (1419, 1443), True, 'import numpy as np\n'), ((3532, 3574), 'numpy.concatenate', 'np.concatenate', (['(BC1_throats, BC2_throats)'], {}), '((BC1_throats, BC2_throats))\n', (3546, 3574), True, 'import numpy as np\n'), ((5948, 5972), 'numpy.concatenate', 'np.concatenate', (['(B1, B2)'], {}), '((B1, B2))\n', (5962, 5972), True, 'import numpy as np\n'), ((8574, 8598), 'numpy.concatenate', 'np.concatenate', (['(B1, B2)'], {}), '((B1, B2))\n', (8588, 8598), True, 'import numpy as np\n'), ((13330, 13351), 'numpy.ones', 'np.ones', (['self.phys.Np'], {}), '(self.phys.Np)\n', (13337, 13351), True, 'import numpy as np\n')] |
from optimizer.utils.intbounds import IntBounds
class TestIntBounds(object):
def test_make_gt(self):
i0 = IntBounds()
i1 = i0.make_gt(IntBounds(10, 10))
assert i1.lower == 11
def test_make_gt_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_gt(IntBounds(10, 10)).make_gt(IntBounds(0, 0))
assert i1.lower == 11
def test_make_lt(self):
i0 = IntBounds()
i1 = i0.make_lt(IntBounds(10, 10))
assert i1.upper == 9
def test_make_lt_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_lt(IntBounds(0, 0)).make_lt(IntBounds(10, 10))
assert i1.upper == -1
def test_both_bounds(self):
i0 = IntBounds()
i1 = i0.make_lt(IntBounds(10, 10)).make_gt(IntBounds(0, 0))
assert i1.upper == 9
assert i1.lower == 1
i2 = i0.make_gt(IntBounds(0, 0)).make_lt(IntBounds(10, 10))
assert i2.lower == 1
assert i2.upper == 9
def test_make_le_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_le(IntBounds(0, 0)).make_le(IntBounds(2, 2))
assert i1.upper == 0
def test_make_ge_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_ge(IntBounds(10, 10)).make_ge(IntBounds(0, 0))
assert i1.lower == 10
| [
"optimizer.utils.intbounds.IntBounds"
]
| [((120, 131), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', ([], {}), '()\n', (129, 131), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((265, 276), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', ([], {}), '()\n', (274, 276), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((419, 430), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', ([], {}), '()\n', (428, 430), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((563, 574), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', ([], {}), '()\n', (572, 574), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((721, 732), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', ([], {}), '()\n', (730, 732), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((1047, 1058), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', ([], {}), '()\n', (1056, 1058), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((1213, 1224), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', ([], {}), '()\n', (1222, 1224), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((157, 174), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(10)', '(10)'], {}), '(10, 10)\n', (166, 174), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((329, 344), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(0)', '(0)'], {}), '(0, 0)\n', (338, 344), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((456, 473), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(10)', '(10)'], {}), '(10, 10)\n', (465, 473), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((625, 642), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(10)', '(10)'], {}), '(10, 10)\n', (634, 642), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((785, 800), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(0)', '(0)'], {}), '(0, 0)\n', (794, 800), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((911, 928), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(10)', '(10)'], {}), '(10, 10)\n', (920, 928), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((1108, 1123), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(2)', '(2)'], {}), '(2, 2)\n', (1117, 1123), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((1276, 1291), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(0)', '(0)'], {}), '(0, 0)\n', (1285, 1291), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((302, 319), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(10)', '(10)'], {}), '(10, 10)\n', (311, 319), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((600, 615), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(0)', '(0)'], {}), '(0, 0)\n', (609, 615), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((758, 775), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(10)', '(10)'], {}), '(10, 10)\n', (767, 775), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((886, 901), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(0)', '(0)'], {}), '(0, 0)\n', (895, 901), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((1083, 1098), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(0)', '(0)'], {}), '(0, 0)\n', (1092, 1098), False, 'from optimizer.utils.intbounds import IntBounds\n'), ((1249, 1266), 'optimizer.utils.intbounds.IntBounds', 'IntBounds', (['(10)', '(10)'], {}), '(10, 10)\n', (1258, 1266), False, 'from optimizer.utils.intbounds import IntBounds\n')] |
from libTask import Queue
from common import configParams
from common import common
def main():
cp = configParams.ConfigParams("config.json")
detectGeneralQueue = Queue.DQueue(cp, len(cp.detect_general_ids), cp.modelPath, common.GENERALDETECT_METHOD_ID,
cp.GPUDevices, cp.detect_general_ids)
print("Run Into Next step")
smokeQueue = Queue.DQueue(cp, len(cp.smoke_ids), cp.modelPath, common.PEOPLESMOKE_METHOD_ID,cp.GPUDevices, cp.smoke_ids)
if __name__ == '__main__':
main() | [
"common.configParams.ConfigParams"
]
| [((105, 145), 'common.configParams.ConfigParams', 'configParams.ConfigParams', (['"""config.json"""'], {}), "('config.json')\n", (130, 145), False, 'from common import configParams\n')] |
import torch, torchvision
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
import argparse, time
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("-i", "--image", type=str, help="Path to image to segment")
p.add_argument("-m", "--model", type=str, help="Model to use", default="COCO-InstanceSegmentation/mask_cascade_rcnn_ResNeSt_200_FPN_syncBN_all_tricks_3x.yaml")
p.add_argument("-t", "--threshold", type=float, help="Threshold for model detections", default=0.4)
p.add_argument("-rs", "--use_resnest", type=bool, help="Whether the selected model uses ResNeSt backbone or no", default=True)
return p.parse_args()
def start_segment(args):
img = args.image
model = args.model
thresh = args.threshold
use_resnest = args.use_resnest
im = cv2.imread(img)
# get default cfg file
cfg = get_cfg()
# replace cfg from specific model yaml file
cfg.merge_from_file(model_zoo.get_config_file(model))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = thresh # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model, resnest=use_resnest)
predictor = DefaultPredictor(cfg)
start = time.time()
outputs = predictor(im)
print("Time eplased: {}".format(time.time() - start))
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2) #rgb image (::-1)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imwrite("output.jpg", out.get_image()[:, :, ::-1])
if __name__ == "__main__":
args = parse_args()
start_segment(args) | [
"detectron2.config.get_cfg",
"argparse.ArgumentParser",
"detectron2.model_zoo.get_checkpoint_url",
"detectron2.model_zoo.get_config_file",
"detectron2.data.MetadataCatalog.get",
"time.time",
"detectron2.engine.DefaultPredictor",
"cv2.imread",
"detectron2.utils.logger.setup_logger"
]
| [((98, 112), 'detectron2.utils.logger.setup_logger', 'setup_logger', ([], {}), '()\n', (110, 112), False, 'from detectron2.utils.logger import setup_logger\n'), ((531, 556), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (554, 556), False, 'import argparse, time\n'), ((1217, 1232), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (1227, 1232), False, 'import os, json, cv2, random\n'), ((1274, 1283), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (1281, 1283), False, 'from detectron2.config import get_cfg\n'), ((1607, 1663), 'detectron2.model_zoo.get_checkpoint_url', 'model_zoo.get_checkpoint_url', (['model'], {'resnest': 'use_resnest'}), '(model, resnest=use_resnest)\n', (1635, 1663), False, 'from detectron2 import model_zoo\n'), ((1681, 1702), 'detectron2.engine.DefaultPredictor', 'DefaultPredictor', (['cfg'], {}), '(cfg)\n', (1697, 1702), False, 'from detectron2.engine import DefaultPredictor\n'), ((1716, 1727), 'time.time', 'time.time', ([], {}), '()\n', (1725, 1727), False, 'import argparse, time\n'), ((1358, 1390), 'detectron2.model_zoo.get_config_file', 'model_zoo.get_config_file', (['model'], {}), '(model)\n', (1383, 1390), False, 'from detectron2 import model_zoo\n'), ((1852, 1894), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['cfg.DATASETS.TRAIN[0]'], {}), '(cfg.DATASETS.TRAIN[0])\n', (1871, 1894), False, 'from detectron2.data import MetadataCatalog, DatasetCatalog\n'), ((1794, 1805), 'time.time', 'time.time', ([], {}), '()\n', (1803, 1805), False, 'import argparse, time\n')] |
"""AMQP Table Encoding/Decoding"""
import struct
import decimal
import calendar
from datetime import datetime
from pika import exceptions
from pika.compat import unicode_type, PY2, long, as_bytes
def encode_short_string(pieces, value):
"""Encode a string value as short string and append it to pieces list
returning the size of the encoded value.
:param list pieces: Already encoded values
:param value: String value to encode
:type value: str or unicode
:rtype: int
"""
encoded_value = as_bytes(value)
length = len(encoded_value)
# 4.2.5.3
# Short strings, stored as an 8-bit unsigned integer length followed by zero
# or more octets of data. Short strings can carry up to 255 octets of UTF-8
# data, but may not contain binary zero octets.
# ...
# 4.2.5.5
# The server SHOULD validate field names and upon receiving an invalid field
# name, it SHOULD signal a connection exception with reply code 503 (syntax
# error).
# -> validate length (avoid truncated utf-8 / corrupted data), but skip null
# byte check.
if length > 255:
raise exceptions.ShortStringTooLong(encoded_value)
pieces.append(struct.pack('B', length))
pieces.append(encoded_value)
return 1 + length
if PY2:
def decode_short_string(encoded, offset):
"""Decode a short string value from ``encoded`` data at ``offset``.
"""
length = struct.unpack_from('B', encoded, offset)[0]
offset += 1
# Purely for compatibility with original python2 code. No idea what
# and why this does.
value = encoded[offset:offset + length]
try:
value = bytes(value)
except UnicodeEncodeError:
pass
offset += length
return value, offset
else:
def decode_short_string(encoded, offset):
"""Decode a short string value from ``encoded`` data at ``offset``.
"""
length = struct.unpack_from('B', encoded, offset)[0]
offset += 1
value = encoded[offset:offset + length].decode('utf8')
offset += length
return value, offset
def encode_table(pieces, table):
"""Encode a dict as an AMQP table appending the encded table to the
pieces list passed in.
:param list pieces: Already encoded frame pieces
:param dict table: The dict to encode
:rtype: int
"""
table = table or {}
length_index = len(pieces)
pieces.append(None) # placeholder
tablesize = 0
for (key, value) in table.items():
tablesize += encode_short_string(pieces, key)
tablesize += encode_value(pieces, value)
pieces[length_index] = struct.pack('>I', tablesize)
return tablesize + 4
def encode_value(pieces, value):
"""Encode the value passed in and append it to the pieces list returning
the the size of the encoded value.
:param list pieces: Already encoded values
:param any value: The value to encode
:rtype: int
"""
if PY2:
if isinstance(value, basestring):
if isinstance(value, unicode_type):
value = value.encode('utf-8')
pieces.append(struct.pack('>cI', b'S', len(value)))
pieces.append(value)
return 5 + len(value)
else:
# support only str on Python 3
if isinstance(value, str):
value = value.encode('utf-8')
pieces.append(struct.pack('>cI', b'S', len(value)))
pieces.append(value)
return 5 + len(value)
if isinstance(value, bool):
pieces.append(struct.pack('>cB', b't', int(value)))
return 2
if isinstance(value, long):
pieces.append(struct.pack('>cq', b'l', value))
return 9
elif isinstance(value, int):
pieces.append(struct.pack('>ci', b'I', value))
return 5
elif isinstance(value, decimal.Decimal):
value = value.normalize()
if value.as_tuple().exponent < 0:
decimals = -value.as_tuple().exponent
raw = int(value * (decimal.Decimal(10) ** decimals))
pieces.append(struct.pack('>cBi', b'D', decimals, raw))
else:
# per spec, the "decimals" octet is unsigned (!)
pieces.append(struct.pack('>cBi', b'D', 0, int(value)))
return 6
elif isinstance(value, datetime):
pieces.append(struct.pack('>cQ', b'T',
calendar.timegm(value.utctimetuple())))
return 9
elif isinstance(value, dict):
pieces.append(struct.pack('>c', b'F'))
return 1 + encode_table(pieces, value)
elif isinstance(value, list):
p = []
for v in value:
encode_value(p, v)
piece = b''.join(p)
pieces.append(struct.pack('>cI', b'A', len(piece)))
pieces.append(piece)
return 5 + len(piece)
elif value is None:
pieces.append(struct.pack('>c', b'V'))
return 1
else:
raise exceptions.UnsupportedAMQPFieldException(pieces, value)
def decode_table(encoded, offset):
"""Decode the AMQP table passed in from the encoded value returning the
decoded result and the number of bytes read plus the offset.
:param str encoded: The binary encoded data to decode
:param int offset: The starting byte offset
:rtype: tuple
"""
result = {}
tablesize = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
limit = offset + tablesize
while offset < limit:
key, offset = decode_short_string(encoded, offset)
value, offset = decode_value(encoded, offset)
result[key] = value
return result, offset
def decode_value(encoded, offset):
"""Decode the value passed in returning the decoded value and the number
of bytes read in addition to the starting offset.
:param str encoded: The binary encoded data to decode
:param int offset: The starting byte offset
:rtype: tuple
:raises: pika.exceptions.InvalidFieldTypeException
"""
# slice to get bytes in Python 3 and str in Python 2
kind = encoded[offset:offset + 1]
offset += 1
# Bool
if kind == b't':
value = struct.unpack_from('>B', encoded, offset)[0]
value = bool(value)
offset += 1
# Short-Short Int
elif kind == b'b':
value = struct.unpack_from('>B', encoded, offset)[0]
offset += 1
# Short-Short Unsigned Int
elif kind == b'B':
value = struct.unpack_from('>b', encoded, offset)[0]
offset += 1
# Short Int
elif kind == b'U':
value = struct.unpack_from('>h', encoded, offset)[0]
offset += 2
# Short Unsigned Int
elif kind == b'u':
value = struct.unpack_from('>H', encoded, offset)[0]
offset += 2
# Long Int
elif kind == b'I':
value = struct.unpack_from('>i', encoded, offset)[0]
offset += 4
# Long Unsigned Int
elif kind == b'i':
value = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
# Long-Long Int
elif kind == b'L':
value = long(struct.unpack_from('>q', encoded, offset)[0])
offset += 8
# Long-Long Unsigned Int
elif kind == b'l':
value = long(struct.unpack_from('>Q', encoded, offset)[0])
offset += 8
# Float
elif kind == b'f':
value = long(struct.unpack_from('>f', encoded, offset)[0])
offset += 4
# Double
elif kind == b'd':
value = long(struct.unpack_from('>d', encoded, offset)[0])
offset += 8
# Decimal
elif kind == b'D':
decimals = struct.unpack_from('B', encoded, offset)[0]
offset += 1
raw = struct.unpack_from('>i', encoded, offset)[0]
offset += 4
value = decimal.Decimal(raw) * (decimal.Decimal(10) ** -decimals)
# Short String
elif kind == b's':
value, offset = decode_short_string(encoded, offset)
# Long String
elif kind == b'S':
length = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
value = encoded[offset:offset + length].decode('utf8')
offset += length
# Field Array
elif kind == b'A':
length = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
offset_end = offset + length
value = []
while offset < offset_end:
v, offset = decode_value(encoded, offset)
value.append(v)
# Timestamp
elif kind == b'T':
value = datetime.utcfromtimestamp(struct.unpack_from('>Q', encoded,
offset)[0])
offset += 8
# Field Table
elif kind == b'F':
(value, offset) = decode_table(encoded, offset)
# Null / Void
elif kind == b'V':
value = None
else:
raise exceptions.InvalidFieldTypeException(kind)
return value, offset
| [
"pika.exceptions.ShortStringTooLong",
"pika.exceptions.InvalidFieldTypeException",
"pika.compat.as_bytes",
"struct.pack",
"pika.exceptions.UnsupportedAMQPFieldException",
"decimal.Decimal",
"struct.unpack_from"
]
| [((524, 539), 'pika.compat.as_bytes', 'as_bytes', (['value'], {}), '(value)\n', (532, 539), False, 'from pika.compat import unicode_type, PY2, long, as_bytes\n'), ((2684, 2712), 'struct.pack', 'struct.pack', (['""">I"""', 'tablesize'], {}), "('>I', tablesize)\n", (2695, 2712), False, 'import struct\n'), ((1133, 1177), 'pika.exceptions.ShortStringTooLong', 'exceptions.ShortStringTooLong', (['encoded_value'], {}), '(encoded_value)\n', (1162, 1177), False, 'from pika import exceptions\n'), ((1197, 1221), 'struct.pack', 'struct.pack', (['"""B"""', 'length'], {}), "('B', length)\n", (1208, 1221), False, 'import struct\n'), ((5390, 5431), 'struct.unpack_from', 'struct.unpack_from', (['""">I"""', 'encoded', 'offset'], {}), "('>I', encoded, offset)\n", (5408, 5431), False, 'import struct\n'), ((1439, 1479), 'struct.unpack_from', 'struct.unpack_from', (['"""B"""', 'encoded', 'offset'], {}), "('B', encoded, offset)\n", (1457, 1479), False, 'import struct\n'), ((1966, 2006), 'struct.unpack_from', 'struct.unpack_from', (['"""B"""', 'encoded', 'offset'], {}), "('B', encoded, offset)\n", (1984, 2006), False, 'import struct\n'), ((3704, 3735), 'struct.pack', 'struct.pack', (['""">cq"""', "b'l'", 'value'], {}), "('>cq', b'l', value)\n", (3715, 3735), False, 'import struct\n'), ((6192, 6233), 'struct.unpack_from', 'struct.unpack_from', (['""">B"""', 'encoded', 'offset'], {}), "('>B', encoded, offset)\n", (6210, 6233), False, 'import struct\n'), ((3809, 3840), 'struct.pack', 'struct.pack', (['""">ci"""', "b'I'", 'value'], {}), "('>ci', b'I', value)\n", (3820, 3840), False, 'import struct\n'), ((6347, 6388), 'struct.unpack_from', 'struct.unpack_from', (['""">B"""', 'encoded', 'offset'], {}), "('>B', encoded, offset)\n", (6365, 6388), False, 'import struct\n'), ((6483, 6524), 'struct.unpack_from', 'struct.unpack_from', (['""">b"""', 'encoded', 'offset'], {}), "('>b', encoded, offset)\n", (6501, 6524), False, 'import struct\n'), ((4121, 4161), 'struct.pack', 'struct.pack', (['""">cBi"""', "b'D'", 'decimals', 'raw'], {}), "('>cBi', b'D', decimals, raw)\n", (4132, 4161), False, 'import struct\n'), ((6604, 6645), 'struct.unpack_from', 'struct.unpack_from', (['""">h"""', 'encoded', 'offset'], {}), "('>h', encoded, offset)\n", (6622, 6645), False, 'import struct\n'), ((4555, 4578), 'struct.pack', 'struct.pack', (['""">c"""', "b'F'"], {}), "('>c', b'F')\n", (4566, 4578), False, 'import struct\n'), ((6734, 6775), 'struct.unpack_from', 'struct.unpack_from', (['""">H"""', 'encoded', 'offset'], {}), "('>H', encoded, offset)\n", (6752, 6775), False, 'import struct\n'), ((4061, 4080), 'decimal.Decimal', 'decimal.Decimal', (['(10)'], {}), '(10)\n', (4076, 4080), False, 'import decimal\n'), ((4990, 5045), 'pika.exceptions.UnsupportedAMQPFieldException', 'exceptions.UnsupportedAMQPFieldException', (['pieces', 'value'], {}), '(pieces, value)\n', (5030, 5045), False, 'from pika import exceptions\n'), ((6854, 6895), 'struct.unpack_from', 'struct.unpack_from', (['""">i"""', 'encoded', 'offset'], {}), "('>i', encoded, offset)\n", (6872, 6895), False, 'import struct\n'), ((4924, 4947), 'struct.pack', 'struct.pack', (['""">c"""', "b'V'"], {}), "('>c', b'V')\n", (4935, 4947), False, 'import struct\n'), ((6983, 7024), 'struct.unpack_from', 'struct.unpack_from', (['""">I"""', 'encoded', 'offset'], {}), "('>I', encoded, offset)\n", (7001, 7024), False, 'import struct\n'), ((7113, 7154), 'struct.unpack_from', 'struct.unpack_from', (['""">q"""', 'encoded', 'offset'], {}), "('>q', encoded, offset)\n", (7131, 7154), False, 'import struct\n'), ((7253, 7294), 'struct.unpack_from', 'struct.unpack_from', (['""">Q"""', 'encoded', 'offset'], {}), "('>Q', encoded, offset)\n", (7271, 7294), False, 'import struct\n'), ((7376, 7417), 'struct.unpack_from', 'struct.unpack_from', (['""">f"""', 'encoded', 'offset'], {}), "('>f', encoded, offset)\n", (7394, 7417), False, 'import struct\n'), ((7500, 7541), 'struct.unpack_from', 'struct.unpack_from', (['""">d"""', 'encoded', 'offset'], {}), "('>d', encoded, offset)\n", (7518, 7541), False, 'import struct\n'), ((7623, 7663), 'struct.unpack_from', 'struct.unpack_from', (['"""B"""', 'encoded', 'offset'], {}), "('B', encoded, offset)\n", (7641, 7663), False, 'import struct\n'), ((7701, 7742), 'struct.unpack_from', 'struct.unpack_from', (['""">i"""', 'encoded', 'offset'], {}), "('>i', encoded, offset)\n", (7719, 7742), False, 'import struct\n'), ((7782, 7802), 'decimal.Decimal', 'decimal.Decimal', (['raw'], {}), '(raw)\n', (7797, 7802), False, 'import decimal\n'), ((7806, 7825), 'decimal.Decimal', 'decimal.Decimal', (['(10)'], {}), '(10)\n', (7821, 7825), False, 'import decimal\n'), ((8003, 8044), 'struct.unpack_from', 'struct.unpack_from', (['""">I"""', 'encoded', 'offset'], {}), "('>I', encoded, offset)\n", (8021, 8044), False, 'import struct\n'), ((8215, 8256), 'struct.unpack_from', 'struct.unpack_from', (['""">I"""', 'encoded', 'offset'], {}), "('>I', encoded, offset)\n", (8233, 8256), False, 'import struct\n'), ((8535, 8576), 'struct.unpack_from', 'struct.unpack_from', (['""">Q"""', 'encoded', 'offset'], {}), "('>Q', encoded, offset)\n", (8553, 8576), False, 'import struct\n'), ((8847, 8889), 'pika.exceptions.InvalidFieldTypeException', 'exceptions.InvalidFieldTypeException', (['kind'], {}), '(kind)\n', (8883, 8889), False, 'from pika import exceptions\n')] |
from pylaas_core.abstract.abstract_service import AbstractService
import time
from pylaas_core.interface.technical.container_configurable_aware_interface import ContainerConfigurableAwareInterface
class DummyConfigurable(AbstractService, ContainerConfigurableAwareInterface):
def __init__(self) -> None:
super().__init__()
self._microtime = int(round(time.time() * 1000))
self._configs = None
def set_configs(self, configurations):
self._configs = configurations
return self
| [
"time.time"
]
| [((375, 386), 'time.time', 'time.time', ([], {}), '()\n', (384, 386), False, 'import time\n')] |
from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView
from .models import BlogPost
from django.contrib.auth.decorators import login_required
class BlogPostHomeView(ListView):
model = BlogPost
context_object_name = "posts"
class BlogPostDetailsView(DetailView):
model = BlogPost
context_object_name = "post"
@method_decorator(login_required, name='dispatch')
class BlogPostCreateView(CreateView):
model = BlogPost
fields = ['title', 'image','author', 'category', 'content']
def get_success_url(self):
return reverse('posts:home')
@method_decorator(login_required, name='dispatch')
class BlogPostUpdateView(UpdateView):
model = BlogPost
fields = ['title', 'author', 'category', 'content']
template_name = 'blog/blogpost_update.html'
@method_decorator(login_required, name='dispatch')
class BlogPostDeleteView(DeleteView):
model = BlogPost
success_url = reverse_lazy('posts:home') | [
"django.urls.reverse",
"django.utils.decorators.method_decorator",
"django.urls.reverse_lazy"
]
| [((464, 513), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (480, 513), False, 'from django.utils.decorators import method_decorator\n'), ((708, 757), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (724, 757), False, 'from django.utils.decorators import method_decorator\n'), ((923, 972), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (939, 972), False, 'from django.utils.decorators import method_decorator\n'), ((1050, 1076), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""posts:home"""'], {}), "('posts:home')\n", (1062, 1076), False, 'from django.urls import reverse_lazy, reverse\n'), ((684, 705), 'django.urls.reverse', 'reverse', (['"""posts:home"""'], {}), "('posts:home')\n", (691, 705), False, 'from django.urls import reverse_lazy, reverse\n')] |
# Copyright (c) 2016 <NAME>, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import mock
from manila.common import constants
from manila import context
from manila import db
from manila import exception
from manila.share import snapshot_access
from manila import test
from manila.tests import db_utils
from manila import utils
@ddt.ddt
class SnapshotAccessTestCase(test.TestCase):
def setUp(self):
super(SnapshotAccessTestCase, self).setUp()
self.driver = self.mock_class("manila.share.driver.ShareDriver",
mock.Mock())
self.snapshot_access = snapshot_access.ShareSnapshotInstanceAccess(
db, self.driver)
self.context = context.get_admin_context()
share = db_utils.create_share()
self.snapshot = db_utils.create_snapshot(share_id=share['id'])
self.snapshot_instance = db_utils.create_snapshot_instance(
snapshot_id=self.snapshot['id'],
share_instance_id=self.snapshot['share']['instance']['id'])
@ddt.data(constants.ACCESS_STATE_QUEUED_TO_APPLY,
constants.ACCESS_STATE_QUEUED_TO_DENY)
def test_update_access_rules(self, state):
rules = []
for i in range(2):
rules.append({
'id': 'id-%s' % i,
'state': state,
'access_id': 'rule_id%s' % i
})
all_rules = copy.deepcopy(rules)
all_rules.append({
'id': 'id-3',
'state': constants.ACCESS_STATE_ERROR,
'access_id': 'rule_id3'
})
snapshot_instance_get = self.mock_object(
db, 'share_snapshot_instance_get',
mock.Mock(return_value=self.snapshot_instance))
snap_get_all_for_snap_instance = self.mock_object(
db, 'share_snapshot_access_get_all_for_snapshot_instance',
mock.Mock(return_value=all_rules))
self.mock_object(db, 'share_snapshot_instance_access_update')
self.mock_object(self.driver, 'snapshot_update_access')
self.mock_object(self.snapshot_access, '_check_needs_refresh',
mock.Mock(return_value=False))
self.mock_object(db, 'share_snapshot_instance_access_delete')
self.snapshot_access.update_access_rules(self.context,
self.snapshot_instance['id'])
snapshot_instance_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'], with_share_data=True)
snap_get_all_for_snap_instance.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'])
if state == constants.ACCESS_STATE_QUEUED_TO_APPLY:
self.driver.snapshot_update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance, rules, add_rules=rules,
delete_rules=[], share_server=None)
else:
self.driver.snapshot_update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance, [], add_rules=[],
delete_rules=rules, share_server=None)
def test_update_access_rules_delete_all_rules(self):
rules = []
for i in range(2):
rules.append({
'id': 'id-%s' % i,
'state': constants.ACCESS_STATE_QUEUED_TO_DENY,
'access_id': 'rule_id%s' % i
})
snapshot_instance_get = self.mock_object(
db, 'share_snapshot_instance_get',
mock.Mock(return_value=self.snapshot_instance))
snap_get_all_for_snap_instance = self.mock_object(
db, 'share_snapshot_access_get_all_for_snapshot_instance',
mock.Mock(side_effect=[rules, []]))
self.mock_object(db, 'share_snapshot_instance_access_update')
self.mock_object(self.driver, 'snapshot_update_access')
self.mock_object(db, 'share_snapshot_instance_access_delete')
self.snapshot_access.update_access_rules(self.context,
self.snapshot_instance['id'],
delete_all_rules=True)
snapshot_instance_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'], with_share_data=True)
snap_get_all_for_snap_instance.assert_called_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'])
self.driver.snapshot_update_access.assert_called_with(
utils.IsAMatcher(context.RequestContext), self.snapshot_instance,
[], add_rules=[], delete_rules=rules, share_server=None)
def test_update_access_rules_exception(self):
rules = []
for i in range(2):
rules.append({
'id': 'id-%s' % i,
'state': constants.ACCESS_STATE_APPLYING,
'access_id': 'rule_id%s' % i
})
snapshot_instance_get = self.mock_object(
db, 'share_snapshot_instance_get',
mock.Mock(return_value=self.snapshot_instance))
snap_get_all_for_snap_instance = self.mock_object(
db, 'share_snapshot_access_get_all_for_snapshot_instance',
mock.Mock(return_value=rules))
self.mock_object(db, 'share_snapshot_instance_access_update')
self.mock_object(self.driver, 'snapshot_update_access',
mock.Mock(side_effect=exception.NotFound))
self.assertRaises(exception.NotFound,
self.snapshot_access.update_access_rules,
self.context, self.snapshot_instance['id'])
snapshot_instance_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'], with_share_data=True)
snap_get_all_for_snap_instance.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'])
self.driver.snapshot_update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), self.snapshot_instance,
rules, add_rules=rules, delete_rules=[], share_server=None)
| [
"manila.context.get_admin_context",
"manila.tests.db_utils.create_snapshot_instance",
"manila.share.snapshot_access.ShareSnapshotInstanceAccess",
"mock.Mock",
"manila.utils.IsAMatcher",
"manila.tests.db_utils.create_share",
"ddt.data",
"copy.deepcopy",
"manila.tests.db_utils.create_snapshot"
]
| [((1617, 1709), 'ddt.data', 'ddt.data', (['constants.ACCESS_STATE_QUEUED_TO_APPLY', 'constants.ACCESS_STATE_QUEUED_TO_DENY'], {}), '(constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.\n ACCESS_STATE_QUEUED_TO_DENY)\n', (1625, 1709), False, 'import ddt\n'), ((1190, 1250), 'manila.share.snapshot_access.ShareSnapshotInstanceAccess', 'snapshot_access.ShareSnapshotInstanceAccess', (['db', 'self.driver'], {}), '(db, self.driver)\n', (1233, 1250), False, 'from manila.share import snapshot_access\n'), ((1287, 1314), 'manila.context.get_admin_context', 'context.get_admin_context', ([], {}), '()\n', (1312, 1314), False, 'from manila import context\n'), ((1331, 1354), 'manila.tests.db_utils.create_share', 'db_utils.create_share', ([], {}), '()\n', (1352, 1354), False, 'from manila.tests import db_utils\n'), ((1379, 1425), 'manila.tests.db_utils.create_snapshot', 'db_utils.create_snapshot', ([], {'share_id': "share['id']"}), "(share_id=share['id'])\n", (1403, 1425), False, 'from manila.tests import db_utils\n'), ((1459, 1589), 'manila.tests.db_utils.create_snapshot_instance', 'db_utils.create_snapshot_instance', ([], {'snapshot_id': "self.snapshot['id']", 'share_instance_id': "self.snapshot['share']['instance']['id']"}), "(snapshot_id=self.snapshot['id'],\n share_instance_id=self.snapshot['share']['instance']['id'])\n", (1492, 1589), False, 'from manila.tests import db_utils\n'), ((1987, 2007), 'copy.deepcopy', 'copy.deepcopy', (['rules'], {}), '(rules)\n', (2000, 2007), False, 'import copy\n'), ((1146, 1157), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1155, 1157), False, 'import mock\n'), ((2269, 2315), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'self.snapshot_instance'}), '(return_value=self.snapshot_instance)\n', (2278, 2315), False, 'import mock\n'), ((2460, 2493), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'all_rules'}), '(return_value=all_rules)\n', (2469, 2493), False, 'import mock\n'), ((2726, 2755), 'mock.Mock', 'mock.Mock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (2735, 2755), False, 'import mock\n'), ((3038, 3078), 'manila.utils.IsAMatcher', 'utils.IsAMatcher', (['context.RequestContext'], {}), '(context.RequestContext)\n', (3054, 3078), False, 'from manila import utils\n'), ((3220, 3260), 'manila.utils.IsAMatcher', 'utils.IsAMatcher', (['context.RequestContext'], {}), '(context.RequestContext)\n', (3236, 3260), False, 'from manila import utils\n'), ((4268, 4314), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'self.snapshot_instance'}), '(return_value=self.snapshot_instance)\n', (4277, 4314), False, 'import mock\n'), ((4459, 4493), 'mock.Mock', 'mock.Mock', ([], {'side_effect': '[rules, []]'}), '(side_effect=[rules, []])\n', (4468, 4493), False, 'import mock\n'), ((4983, 5023), 'manila.utils.IsAMatcher', 'utils.IsAMatcher', (['context.RequestContext'], {}), '(context.RequestContext)\n', (4999, 5023), False, 'from manila import utils\n'), ((5160, 5200), 'manila.utils.IsAMatcher', 'utils.IsAMatcher', (['context.RequestContext'], {}), '(context.RequestContext)\n', (5176, 5200), False, 'from manila import utils\n'), ((5319, 5359), 'manila.utils.IsAMatcher', 'utils.IsAMatcher', (['context.RequestContext'], {}), '(context.RequestContext)\n', (5335, 5359), False, 'from manila import utils\n'), ((5842, 5888), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'self.snapshot_instance'}), '(return_value=self.snapshot_instance)\n', (5851, 5888), False, 'import mock\n'), ((6033, 6062), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'rules'}), '(return_value=rules)\n', (6042, 6062), False, 'import mock\n'), ((6224, 6265), 'mock.Mock', 'mock.Mock', ([], {'side_effect': 'exception.NotFound'}), '(side_effect=exception.NotFound)\n', (6233, 6265), False, 'import mock\n'), ((6520, 6560), 'manila.utils.IsAMatcher', 'utils.IsAMatcher', (['context.RequestContext'], {}), '(context.RequestContext)\n', (6536, 6560), False, 'from manila import utils\n'), ((6702, 6742), 'manila.utils.IsAMatcher', 'utils.IsAMatcher', (['context.RequestContext'], {}), '(context.RequestContext)\n', (6718, 6742), False, 'from manila import utils\n'), ((6867, 6907), 'manila.utils.IsAMatcher', 'utils.IsAMatcher', (['context.RequestContext'], {}), '(context.RequestContext)\n', (6883, 6907), False, 'from manila import utils\n'), ((3452, 3492), 'manila.utils.IsAMatcher', 'utils.IsAMatcher', (['context.RequestContext'], {}), '(context.RequestContext)\n', (3468, 3492), False, 'from manila import utils\n'), ((3712, 3752), 'manila.utils.IsAMatcher', 'utils.IsAMatcher', (['context.RequestContext'], {}), '(context.RequestContext)\n', (3728, 3752), False, 'from manila import utils\n')] |
# This sample tests the type checker's reportUnnecessaryCast feature.
from typing import cast, Union
def foo(a: int):
# This should generate an error if
# reportUnnecessaryCast is enabled.
b = cast(int, a)
c: Union[int, str] = "hello"
d = cast(int, c)
| [
"typing.cast"
]
| [((256, 268), 'typing.cast', 'cast', (['int', 'c'], {}), '(int, c)\n', (260, 268), False, 'from typing import cast, Union\n'), ((208, 220), 'typing.cast', 'cast', (['int', 'a'], {}), '(int, a)\n', (212, 220), False, 'from typing import cast, Union\n')] |
from http import HTTPStatus
from typing import Iterable, Union, Mapping
from flask import request
from flask_restful import Resource, fields, marshal
from metadata_service.proxy import get_proxy_client
popular_table_fields = {
'database': fields.String,
'cluster': fields.String,
'schema': fields.String,
'table_name': fields.String(attribute='name'),
'table_description': fields.String(attribute='description'), # Optional
}
popular_tables_fields = {
'popular_tables': fields.List(fields.Nested(popular_table_fields))
}
class PopularTablesAPI(Resource):
"""
PopularTables API
"""
def __init__(self) -> None:
self.client = get_proxy_client()
def get(self) -> Iterable[Union[Mapping, int, None]]:
limit = request.args.get('limit', 10)
popular_tables = self.client.get_popular_tables(num_entries=limit)
return marshal({'popular_tables': popular_tables}, popular_tables_fields), HTTPStatus.OK
| [
"flask.request.args.get",
"flask_restful.fields.String",
"flask_restful.fields.Nested",
"flask_restful.marshal",
"metadata_service.proxy.get_proxy_client"
]
| [((338, 369), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""name"""'}), "(attribute='name')\n", (351, 369), False, 'from flask_restful import Resource, fields, marshal\n'), ((396, 434), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""description"""'}), "(attribute='description')\n", (409, 434), False, 'from flask_restful import Resource, fields, marshal\n'), ((511, 546), 'flask_restful.fields.Nested', 'fields.Nested', (['popular_table_fields'], {}), '(popular_table_fields)\n', (524, 546), False, 'from flask_restful import Resource, fields, marshal\n'), ((678, 696), 'metadata_service.proxy.get_proxy_client', 'get_proxy_client', ([], {}), '()\n', (694, 696), False, 'from metadata_service.proxy import get_proxy_client\n'), ((772, 801), 'flask.request.args.get', 'request.args.get', (['"""limit"""', '(10)'], {}), "('limit', 10)\n", (788, 801), False, 'from flask import request\n'), ((892, 958), 'flask_restful.marshal', 'marshal', (["{'popular_tables': popular_tables}", 'popular_tables_fields'], {}), "({'popular_tables': popular_tables}, popular_tables_fields)\n", (899, 958), False, 'from flask_restful import Resource, fields, marshal\n')] |
import requests
import logging
import cfscrape
import os
from manhwaDownloader.constants import CONSTANTS as CONST
logging.basicConfig(level=logging.DEBUG)
folderPath = os.path.join(CONST.OUTPUTPATH, 'serious-taste-of-forbbiden-fruit')
logging.info(len([file for file in os.walk(folderPath)]))
walkList = [file for file in os.walk(folderPath)]
chapterDicts = dict()
for folder, _, files in walkList[1:]:
chapterDicts.update({folder: files})
print(chapterDicts) | [
"logging.basicConfig",
"os.path.join",
"os.walk"
]
| [((116, 156), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (135, 156), False, 'import logging\n'), ((171, 237), 'os.path.join', 'os.path.join', (['CONST.OUTPUTPATH', '"""serious-taste-of-forbbiden-fruit"""'], {}), "(CONST.OUTPUTPATH, 'serious-taste-of-forbbiden-fruit')\n", (183, 237), False, 'import os\n'), ((326, 345), 'os.walk', 'os.walk', (['folderPath'], {}), '(folderPath)\n', (333, 345), False, 'import os\n'), ((274, 293), 'os.walk', 'os.walk', (['folderPath'], {}), '(folderPath)\n', (281, 293), False, 'import os\n')] |
# define custom R2 metrics for Keras backend
from keras import backend as K
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
# base model architecture definition
def model():
model = Sequential()
#input layer
model.add(Dense(input_dims, input_dim=input_dims))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
# hidden layers
model.add(Dense(input_dims))
model.add(BatchNormalization())
model.add(Activation(act_func))
model.add(Dropout(0.3))
model.add(Dense(input_dims//2))
model.add(BatchNormalization())
model.add(Activation(act_func))
model.add(Dropout(0.3))
model.add(Dense(input_dims//4, activation=act_func))
# output layer (y_pred)
model.add(Dense(1, activation='linear'))
# compile this model
model.compile(loss='mean_squared_error', # one may use 'mean_absolute_error' as alternative
optimizer='adam',
metrics=[r2_keras] # you can add several if needed
)
# Visualize NN architecture
print(model.summary())
return model
################K2
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import RobustScaler
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, InputLayer, GaussianNoise
from keras.wrappers.scikit_learn import KerasRegressor
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
#
# Data preparation
#
y_train = train['y'].values
id_test = test['ID']
num_train = len(train)
df_all = pd.concat([train, test])
df_all.drop(['ID', 'y'], axis=1, inplace=True)
# One-hot encoding of categorical/strings
df_all = pd.get_dummies(df_all, drop_first=True)
# Sscaling features
scaler = RobustScaler()
df_all = scaler.fit_transform(df_all)
train = df_all[:num_train]
test = df_all[num_train:]
# Keep only the most contributing features
sfm = SelectFromModel(LassoCV())
sfm.fit(train, y_train)
train = sfm.transform(train)
test = sfm.transform(test)
print ('Number of features : %d' % train.shape[1])
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def build_model_fn(neurons=20, noise=0.25):
model = Sequential()
model.add(InputLayer(input_shape=(train.shape[1],)))
model.add(GaussianNoise(noise))
model.add(Dense(neurons, activation='tanh'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mean_squared_error', optimizer='nadam', metrics=[r2_keras])
return model
#
# Tuning model parameters
#
model = KerasRegressor(build_fn=build_model_fn, epochs=75, verbose=0)
gsc = GridSearchCV(
estimator=model,
param_grid={
#'neurons': range(18,31,4),
'noise': [x/20.0 for x in range(3, 7)],
},
#scoring='r2',
scoring='neg_mean_squared_error',
cv=5
)
grid_result = gsc.fit(train, y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
for test_mean, test_stdev, train_mean, train_stdev, param in zip(
grid_result.cv_results_['mean_test_score'],
grid_result.cv_results_['std_test_score'],
grid_result.cv_results_['mean_train_score'],
grid_result.cv_results_['std_train_score'],
grid_result.cv_results_['params']):
print("Train: %f (%f) // Test : %f (%f) with: %r" % (train_mean, train_stdev, test_mean, test_stdev, param))
#
# Train model with best params for submission
#
model = build_model_fn(**grid_result.best_params_)
model.fit(train, y_train, epochs=75, verbose=2)
y_test = model.predict(test).flatten()
df_sub = pd.DataFrame({'ID': id_test, 'y': y_test})
df_sub.to_csv('mercedes-submission.csv', index=False)
#########################
import pandas as pd
import numpy as np
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.decomposition import PCA, FastICA
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline, Pipeline, _name_estimators
from sklearn.linear_model import ElasticNet, ElasticNetCV
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import r2_score
from sklearn.base import BaseEstimator, TransformerMixin
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train['y'].values
y_mean = np.mean(y_train)
id_test = test['ID']
num_train = len(train)
df_all = pd.concat([train, test])
df_all.drop(['ID', 'y'], axis=1, inplace=True)
# One-hot encoding of categorical/strings
df_all = pd.get_dummies(df_all, drop_first=True)
train = df_all[:num_train]
test = df_all[num_train:]
class AddColumns(BaseEstimator, TransformerMixin):
def __init__(self, transform_=None):
self.transform_ = transform_
def fit(self, X, y=None):
self.transform_.fit(X, y)
return self
def transform(self, X, y=None):
xform_data = self.transform_.transform(X, y)
return np.append(X, xform_data, axis=1)
class LogExpPipeline(Pipeline):
def fit(self, X, y):
super(LogExpPipeline, self).fit(X, np.log1p(y))
def predict(self, X):
return np.expm1(super(LogExpPipeline, self).predict(X))
#
# Model/pipeline with scaling,pca,svm
#
svm_pipe = LogExpPipeline(_name_estimators([RobustScaler(),
PCA(),
SVR(kernel='rbf', C=1.0, epsilon=0.05)]))
# results = cross_val_score(svm_pipe, train, y_train, cv=5, scoring='r2')
# print("SVM score: %.4f (%.4f)" % (results.mean(), results.std()))
# exit()
#
# Model/pipeline with scaling,pca,ElasticNet
#
en_pipe = LogExpPipeline(_name_estimators([RobustScaler(),
PCA(n_components=125),
ElasticNet(alpha=0.001, l1_ratio=0.1)]))
#
# XGBoost model
#
xgb_model = xgb.sklearn.XGBRegressor(max_depth=4, learning_rate=0.005, subsample=0.921,
objective='reg:linear', n_estimators=1300, base_score=y_mean)
xgb_pipe = Pipeline(_name_estimators([AddColumns(transform_=PCA(n_components=10)),
AddColumns(transform_=FastICA(n_components=10, max_iter=500)),
xgb_model]))
# results = cross_val_score(xgb_model, train, y_train, cv=5, scoring='r2')
# print("XGB score: %.4f (%.4f)" % (results.mean(), results.std()))
#
# Random Forest
#
rf_model = RandomForestRegressor(n_estimators=250, n_jobs=4, min_samples_split=25,
min_samples_leaf=25, max_depth=3)
# results = cross_val_score(rf_model, train, y_train, cv=5, scoring='r2')
# print("RF score: %.4f (%.4f)" % (results.mean(), results.std()))
#
# Now the training and stacking part. In previous version i just tried to train each model and
# find the best combination, that lead to a horrible score (Overfit?). Code below does out-of-fold
# training/predictions and then we combine the final results.
#
# Read here for more explanation (This code was borrowed/adapted) :
#
class Ensemble(object):
def __init__(self, n_splits, stacker, base_models):
self.n_splits = n_splits
self.stacker = stacker
self.base_models = base_models
def fit_predict(self, X, y, T):
X = np.array(X)
y = np.array(y)
T = np.array(T)
folds = list(KFold(n_splits=self.n_splits, shuffle=True, random_state=2016).split(X, y))
S_train = np.zeros((X.shape[0], len(self.base_models)))
S_test = np.zeros((T.shape[0], len(self.base_models)))
for i, clf in enumerate(self.base_models):
S_test_i = np.zeros((T.shape[0], self.n_splits))
for j, (train_idx, test_idx) in enumerate(folds):
X_train = X[train_idx]
y_train = y[train_idx]
X_holdout = X[test_idx]
y_holdout = y[test_idx]
clf.fit(X_train, y_train)
y_pred = clf.predict(X_holdout)[:]
print ("Model %d fold %d score %f" % (i, j, r2_score(y_holdout, y_pred)))
S_train[test_idx, i] = y_pred
S_test_i[:, j] = clf.predict(T)[:]
S_test[:, i] = S_test_i.mean(axis=1)
# results = cross_val_score(self.stacker, S_train, y, cv=5, scoring='r2')
# print("Stacker score: %.4f (%.4f)" % (results.mean(), results.std()))
# exit()
self.stacker.fit(S_train, y)
res = self.stacker.predict(S_test)[:]
return res
stack = Ensemble(n_splits=5,
#stacker=ElasticNetCV(l1_ratio=[x/10.0 for x in range(1,10)]),
stacker=ElasticNet(l1_ratio=0.1, alpha=1.4),
base_models=(svm_pipe, en_pipe, xgb_pipe, rf_model))
y_test = stack.fit_predict(train, y_train, test)
df_sub = pd.DataFrame({'ID': id_test, 'y': y_test})
df_sub.to_csv('submission.csv', index=False)
#############################
'''This example demonstrates the use of Convolution1D for text classification.
Gets to 0.89 test accuracy after 2 epochs.
90s/epoch on Intel i5 2.4Ghz CPU.
10s/epoch on Tesla K40 GPU.
'''
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
# set parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 2
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
| [
"pandas.read_csv",
"keras.layers.GlobalMaxPooling1D",
"numpy.array",
"keras.layers.Activation",
"keras.layers.Dense",
"keras.preprocessing.sequence.pad_sequences",
"sklearn.decomposition.FastICA",
"sklearn.model_selection.KFold",
"xgboost.sklearn.XGBRegressor",
"sklearn.metrics.r2_score",
"numpy.mean",
"sklearn.ensemble.RandomForestRegressor",
"keras.datasets.imdb.load_data",
"sklearn.decomposition.PCA",
"keras.wrappers.scikit_learn.KerasRegressor",
"keras.backend.square",
"pandas.DataFrame",
"keras.backend.epsilon",
"keras.layers.InputLayer",
"keras.layers.GaussianNoise",
"sklearn.svm.SVR",
"sklearn.linear_model.ElasticNet",
"sklearn.linear_model.LassoCV",
"keras.models.Sequential",
"pandas.get_dummies",
"numpy.log1p",
"keras.layers.Dropout",
"keras.backend.mean",
"numpy.append",
"numpy.zeros",
"sklearn.preprocessing.RobustScaler",
"keras.layers.Embedding",
"pandas.concat",
"keras.layers.Conv1D"
]
| [((1722, 1755), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train.csv"""'], {}), "('../input/train.csv')\n", (1733, 1755), True, 'import pandas as pd\n'), ((1763, 1795), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test.csv"""'], {}), "('../input/test.csv')\n", (1774, 1795), True, 'import pandas as pd\n'), ((1903, 1927), 'pandas.concat', 'pd.concat', (['[train, test]'], {}), '([train, test])\n', (1912, 1927), True, 'import pandas as pd\n'), ((2027, 2066), 'pandas.get_dummies', 'pd.get_dummies', (['df_all'], {'drop_first': '(True)'}), '(df_all, drop_first=True)\n', (2041, 2066), True, 'import pandas as pd\n'), ((2097, 2111), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (2109, 2111), False, 'from sklearn.preprocessing import RobustScaler\n'), ((3005, 3066), 'keras.wrappers.scikit_learn.KerasRegressor', 'KerasRegressor', ([], {'build_fn': 'build_model_fn', 'epochs': '(75)', 'verbose': '(0)'}), '(build_fn=build_model_fn, epochs=75, verbose=0)\n', (3019, 3066), False, 'from keras.wrappers.scikit_learn import KerasRegressor\n'), ((4047, 4089), 'pandas.DataFrame', 'pd.DataFrame', (["{'ID': id_test, 'y': y_test}"], {}), "({'ID': id_test, 'y': y_test})\n", (4059, 4089), True, 'import pandas as pd\n'), ((4718, 4751), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train.csv"""'], {}), "('../input/train.csv')\n", (4729, 4751), True, 'import pandas as pd\n'), ((4759, 4791), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test.csv"""'], {}), "('../input/test.csv')\n", (4770, 4791), True, 'import pandas as pd\n'), ((4830, 4846), 'numpy.mean', 'np.mean', (['y_train'], {}), '(y_train)\n', (4837, 4846), True, 'import numpy as np\n'), ((4901, 4925), 'pandas.concat', 'pd.concat', (['[train, test]'], {}), '([train, test])\n', (4910, 4925), True, 'import pandas as pd\n'), ((5025, 5064), 'pandas.get_dummies', 'pd.get_dummies', (['df_all'], {'drop_first': '(True)'}), '(df_all, drop_first=True)\n', (5039, 5064), True, 'import pandas as pd\n'), ((6451, 6592), 'xgboost.sklearn.XGBRegressor', 'xgb.sklearn.XGBRegressor', ([], {'max_depth': '(4)', 'learning_rate': '(0.005)', 'subsample': '(0.921)', 'objective': '"""reg:linear"""', 'n_estimators': '(1300)', 'base_score': 'y_mean'}), "(max_depth=4, learning_rate=0.005, subsample=0.921,\n objective='reg:linear', n_estimators=1300, base_score=y_mean)\n", (6475, 6592), True, 'import xgboost as xgb\n'), ((7076, 7185), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(250)', 'n_jobs': '(4)', 'min_samples_split': '(25)', 'min_samples_leaf': '(25)', 'max_depth': '(3)'}), '(n_estimators=250, n_jobs=4, min_samples_split=25,\n min_samples_leaf=25, max_depth=3)\n', (7097, 7185), False, 'from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor\n'), ((9461, 9503), 'pandas.DataFrame', 'pd.DataFrame', (["{'ID': id_test, 'y': y_test}"], {}), "({'ID': id_test, 'y': y_test})\n", (9473, 9503), True, 'import pandas as pd\n'), ((10268, 10306), 'keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'num_words': 'max_features'}), '(num_words=max_features)\n', (10282, 10306), False, 'from keras.datasets import imdb\n'), ((10434, 10480), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['x_train'], {'maxlen': 'maxlen'}), '(x_train, maxlen=maxlen)\n', (10456, 10480), False, 'from keras.preprocessing import sequence\n'), ((10490, 10535), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['x_test'], {'maxlen': 'maxlen'}), '(x_test, maxlen=maxlen)\n', (10512, 10535), False, 'from keras.preprocessing import sequence\n'), ((10645, 10657), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (10655, 10657), False, 'from keras.models import Sequential\n'), ((331, 343), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (341, 343), False, 'from keras.models import Sequential\n'), ((2270, 2279), 'sklearn.linear_model.LassoCV', 'LassoCV', ([], {}), '()\n', (2277, 2279), False, 'from sklearn.linear_model import LassoCV\n'), ((2656, 2668), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2666, 2668), False, 'from keras.models import Sequential\n'), ((10780, 10840), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embedding_dims'], {'input_length': 'maxlen'}), '(max_features, embedding_dims, input_length=maxlen)\n', (10789, 10840), False, 'from keras.layers import Embedding\n'), ((10892, 10904), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (10899, 10904), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((11012, 11087), 'keras.layers.Conv1D', 'Conv1D', (['filters', 'kernel_size'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'strides': '(1)'}), "(filters, kernel_size, padding='valid', activation='relu', strides=1)\n", (11018, 11087), False, 'from keras.layers import Conv1D, GlobalMaxPooling1D\n'), ((11189, 11209), 'keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (11207, 11209), False, 'from keras.layers import Conv1D, GlobalMaxPooling1D\n'), ((11255, 11273), 'keras.layers.Dense', 'Dense', (['hidden_dims'], {}), '(hidden_dims)\n', (11260, 11273), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((11285, 11297), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (11292, 11297), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((11309, 11327), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11319, 11327), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((11416, 11424), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (11421, 11424), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((11436, 11457), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (11446, 11457), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((127, 152), 'keras.backend.square', 'K.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (135, 152), True, 'from keras import backend as K\n'), ((375, 414), 'keras.layers.Dense', 'Dense', (['input_dims'], {'input_dim': 'input_dims'}), '(input_dims, input_dim=input_dims)\n', (380, 414), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((466, 484), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (476, 484), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((500, 512), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (507, 512), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((548, 565), 'keras.layers.Dense', 'Dense', (['input_dims'], {}), '(input_dims)\n', (553, 565), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((617, 637), 'keras.layers.Activation', 'Activation', (['act_func'], {}), '(act_func)\n', (627, 637), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((653, 665), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (660, 665), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((686, 708), 'keras.layers.Dense', 'Dense', (['(input_dims // 2)'], {}), '(input_dims // 2)\n', (691, 708), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((758, 778), 'keras.layers.Activation', 'Activation', (['act_func'], {}), '(act_func)\n', (768, 778), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((794, 806), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (801, 806), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((827, 870), 'keras.layers.Dense', 'Dense', (['(input_dims // 4)'], {'activation': 'act_func'}), '(input_dims // 4, activation=act_func)\n', (832, 870), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((917, 946), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (922, 946), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2464, 2489), 'keras.backend.square', 'K.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (2472, 2489), True, 'from keras import backend as K\n'), ((2683, 2724), 'keras.layers.InputLayer', 'InputLayer', ([], {'input_shape': '(train.shape[1],)'}), '(input_shape=(train.shape[1],))\n', (2693, 2724), False, 'from keras.layers import Dense, InputLayer, GaussianNoise\n'), ((2740, 2760), 'keras.layers.GaussianNoise', 'GaussianNoise', (['noise'], {}), '(noise)\n', (2753, 2760), False, 'from keras.layers import Dense, InputLayer, GaussianNoise\n'), ((2776, 2809), 'keras.layers.Dense', 'Dense', (['neurons'], {'activation': '"""tanh"""'}), "(neurons, activation='tanh')\n", (2781, 2809), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2825, 2854), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (2830, 2854), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((5440, 5472), 'numpy.append', 'np.append', (['X', 'xform_data'], {'axis': '(1)'}), '(X, xform_data, axis=1)\n', (5449, 5472), True, 'import numpy as np\n'), ((7924, 7935), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (7932, 7935), True, 'import numpy as np\n'), ((7948, 7959), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7956, 7959), True, 'import numpy as np\n'), ((7972, 7983), 'numpy.array', 'np.array', (['T'], {}), '(T)\n', (7980, 7983), True, 'import numpy as np\n'), ((9294, 9329), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'l1_ratio': '(0.1)', 'alpha': '(1.4)'}), '(l1_ratio=0.1, alpha=1.4)\n', (9304, 9329), False, 'from sklearn.linear_model import ElasticNet, ElasticNetCV\n'), ((5575, 5586), 'numpy.log1p', 'np.log1p', (['y'], {}), '(y)\n', (5583, 5586), True, 'import numpy as np\n'), ((5766, 5780), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (5778, 5780), False, 'from sklearn.preprocessing import RobustScaler\n'), ((5826, 5831), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (5829, 5831), False, 'from sklearn.decomposition import PCA, FastICA\n'), ((5877, 5915), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'C': '(1.0)', 'epsilon': '(0.05)'}), "(kernel='rbf', C=1.0, epsilon=0.05)\n", (5880, 5915), False, 'from sklearn.svm import SVR\n'), ((6252, 6266), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (6264, 6266), False, 'from sklearn.preprocessing import RobustScaler\n'), ((6311, 6332), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(125)'}), '(n_components=125)\n', (6314, 6332), False, 'from sklearn.decomposition import PCA, FastICA\n'), ((6377, 6414), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'alpha': '(0.001)', 'l1_ratio': '(0.1)'}), '(alpha=0.001, l1_ratio=0.1)\n', (6387, 6414), False, 'from sklearn.linear_model import ElasticNet, ElasticNetCV\n'), ((8285, 8322), 'numpy.zeros', 'np.zeros', (['(T.shape[0], self.n_splits)'], {}), '((T.shape[0], self.n_splits))\n', (8293, 8322), True, 'import numpy as np\n'), ((195, 209), 'keras.backend.mean', 'K.mean', (['y_true'], {}), '(y_true)\n', (201, 209), True, 'from keras import backend as K\n'), ((249, 260), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (258, 260), True, 'from keras import backend as K\n'), ((2531, 2545), 'keras.backend.mean', 'K.mean', (['y_true'], {}), '(y_true)\n', (2537, 2545), True, 'from keras import backend as K\n'), ((2584, 2595), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2593, 2595), True, 'from keras import backend as K\n'), ((6724, 6744), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(10)'}), '(n_components=10)\n', (6727, 6744), False, 'from sklearn.decomposition import PCA, FastICA\n'), ((6807, 6845), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'n_components': '(10)', 'max_iter': '(500)'}), '(n_components=10, max_iter=500)\n', (6814, 6845), False, 'from sklearn.decomposition import PCA, FastICA\n'), ((8006, 8068), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.n_splits', 'shuffle': '(True)', 'random_state': '(2016)'}), '(n_splits=self.n_splits, shuffle=True, random_state=2016)\n', (8011, 8068), False, 'from sklearn.model_selection import cross_val_score, KFold\n'), ((8699, 8726), 'sklearn.metrics.r2_score', 'r2_score', (['y_holdout', 'y_pred'], {}), '(y_holdout, y_pred)\n', (8707, 8726), False, 'from sklearn.metrics import r2_score\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
class Net(Cell):
def __init__(self,
strategy1=None,
strategy2=None,
strategy3=None,
axis=0,
init_flag=True,
split_tuple=(4, 4),
split_string="manual_split",
param_shape=(8, 8)):
super().__init__()
self.gatherv2 = P.Gather().shard(strategy1)
self.gatherv2.add_prim_attr(split_string, split_tuple)
self.mul = P.Mul().shard(strategy2)
self.reshape = P.Reshape()
self.matmul = P.MatMul().shard(strategy3)
self.matmul.add_prim_attr("forward_reduce_scatter", True)
if init_flag:
self.param = Parameter(initializer("ones", param_shape, ms.float32), name="gatherv2_param")
else:
self.param = Parameter(Tensor(np.ones(param_shape), dtype=ms.float32), name="gatherv2_param")
self.mul_weight = Parameter(initializer("ones", (8, 8, 8), ms.float32), name="mul_weight")
self.matmul_weight = Parameter(initializer("ones", (64, 16), ms.float32), name="matmul_weight")
self.axis = axis
def construct(self, x, b):
out = self.gatherv2(self.param, x, self.axis)
out = self.mul(out, self.mul_weight)
out = self.reshape(out, (8, 64))
out = self.matmul(out, self.matmul_weight)
return out
_x = Tensor(np.ones([8, 8]), dtype=ms.int32)
_b = Tensor(np.ones([64, 8]), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_cell_graph_executor.compile(train_net, _x, _b, auto_parallel_mode=True)
context.reset_auto_parallel_context()
def test_normal_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
compile_net(net)
def test_normal_split2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 4, 1), (1, 4, 1))
strategy3 = ((1, 4), (4, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=17)
strategy1 = ((4, 8), (1, 4))
strategy2 = ((1, 4, 8), (1, 4, 8))
strategy3 = ((1, 32), (32, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split_with_offset():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_string="manual_split_with_offset", split_tuple=((4, 0), (4, 4)))
compile_net(net)
def test_auto_parallel_error():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=2, global_rank=0)
net = Net()
with pytest.raises(RuntimeError):
compile_net(net)
def test_axis_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, axis=1)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (8, 1))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (1, 8))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error4():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 8), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error5():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_split_tuple_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=((5, 0), (5, 5)))
with pytest.raises(RuntimeError):
compile_net(net)
def test_parameter_use_tensor_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, init_flag=False)
with pytest.raises(RuntimeError):
compile_net(net)
| [
"mindspore.common.api._cell_graph_executor.compile",
"numpy.ones",
"mindspore.ops.operations.Mul",
"mindspore.nn.TrainOneStepCell",
"mindspore.ops.operations.MatMul",
"mindspore.ops.operations.Reshape",
"mindspore.context.reset_auto_parallel_context",
"pytest.raises",
"mindspore.common.initializer.initializer",
"mindspore.context.set_auto_parallel_context",
"mindspore.ops.operations.Gather"
]
| [((2366, 2381), 'numpy.ones', 'np.ones', (['[8, 8]'], {}), '([8, 8])\n', (2373, 2381), True, 'import numpy as np\n'), ((2411, 2427), 'numpy.ones', 'np.ones', (['[64, 8]'], {}), '([64, 8])\n', (2418, 2427), True, 'import numpy as np\n'), ((2569, 2601), 'mindspore.nn.TrainOneStepCell', 'TrainOneStepCell', (['net', 'optimizer'], {}), '(net, optimizer)\n', (2585, 2601), False, 'from mindspore.nn import Cell, TrainOneStepCell, Momentum\n'), ((2666, 2738), 'mindspore.common.api._cell_graph_executor.compile', '_cell_graph_executor.compile', (['train_net', '_x', '_b'], {'auto_parallel_mode': '(True)'}), '(train_net, _x, _b, auto_parallel_mode=True)\n', (2694, 2738), False, 'from mindspore.common.api import _cell_graph_executor\n'), ((2743, 2780), 'mindspore.context.reset_auto_parallel_context', 'context.reset_auto_parallel_context', ([], {}), '()\n', (2778, 2780), False, 'from mindspore import context, Tensor, Parameter\n'), ((2812, 2914), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(2)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=2, global_rank=0)\n", (2845, 2914), False, 'from mindspore import context, Tensor, Parameter\n'), ((3116, 3218), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(4)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=4, global_rank=0)\n", (3149, 3218), False, 'from mindspore import context, Tensor, Parameter\n'), ((3470, 3574), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(32)', 'global_rank': '(17)'}), "(parallel_mode='semi_auto_parallel',\n device_num=32, global_rank=17)\n", (3503, 3574), False, 'from mindspore import context, Tensor, Parameter\n'), ((3839, 3941), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(2)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=2, global_rank=0)\n", (3872, 3941), False, 'from mindspore import context, Tensor, Parameter\n'), ((4220, 4318), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""auto_parallel"""', 'device_num': '(2)', 'global_rank': '(0)'}), "(parallel_mode='auto_parallel', device_num\n =2, global_rank=0)\n", (4253, 4318), False, 'from mindspore import context, Tensor, Parameter\n'), ((4422, 4524), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(2)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=2, global_rank=0)\n", (4455, 4524), False, 'from mindspore import context, Tensor, Parameter\n'), ((4777, 4879), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(8)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=8, global_rank=0)\n", (4810, 4879), False, 'from mindspore import context, Tensor, Parameter\n'), ((5125, 5227), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(8)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=8, global_rank=0)\n", (5158, 5227), False, 'from mindspore import context, Tensor, Parameter\n'), ((5473, 5575), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(8)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=8, global_rank=0)\n", (5506, 5575), False, 'from mindspore import context, Tensor, Parameter\n'), ((5821, 5923), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(2)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=2, global_rank=0)\n", (5854, 5923), False, 'from mindspore import context, Tensor, Parameter\n'), ((6169, 6271), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(4)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=4, global_rank=0)\n", (6202, 6271), False, 'from mindspore import context, Tensor, Parameter\n'), ((6519, 6621), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(2)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=2, global_rank=0)\n", (6552, 6621), False, 'from mindspore import context, Tensor, Parameter\n'), ((6908, 7010), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(2)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=2, global_rank=0)\n", (6941, 7010), False, 'from mindspore import context, Tensor, Parameter\n'), ((1508, 1519), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (1517, 1519), True, 'from mindspore.ops import operations as P\n'), ((4339, 4366), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (4352, 4366), False, 'import pytest\n'), ((4690, 4717), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (4703, 4717), False, 'import pytest\n'), ((5037, 5064), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5050, 5064), False, 'import pytest\n'), ((5385, 5412), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5398, 5412), False, 'import pytest\n'), ((5733, 5760), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5746, 5760), False, 'import pytest\n'), ((6081, 6108), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (6094, 6108), False, 'import pytest\n'), ((6429, 6456), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (6442, 6456), False, 'import pytest\n'), ((6809, 6836), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (6822, 6836), False, 'import pytest\n'), ((7185, 7212), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (7198, 7212), False, 'import pytest\n'), ((1918, 1960), 'mindspore.common.initializer.initializer', 'initializer', (['"""ones"""', '(8, 8, 8)', 'ms.float32'], {}), "('ones', (8, 8, 8), ms.float32)\n", (1929, 1960), False, 'from mindspore.common.initializer import initializer\n'), ((2020, 2061), 'mindspore.common.initializer.initializer', 'initializer', (['"""ones"""', '(64, 16)', 'ms.float32'], {}), "('ones', (64, 16), ms.float32)\n", (2031, 2061), False, 'from mindspore.common.initializer import initializer\n'), ((1350, 1360), 'mindspore.ops.operations.Gather', 'P.Gather', ([], {}), '()\n', (1358, 1360), True, 'from mindspore.ops import operations as P\n'), ((1460, 1467), 'mindspore.ops.operations.Mul', 'P.Mul', ([], {}), '()\n', (1465, 1467), True, 'from mindspore.ops import operations as P\n'), ((1542, 1552), 'mindspore.ops.operations.MatMul', 'P.MatMul', ([], {}), '()\n', (1550, 1552), True, 'from mindspore.ops import operations as P\n'), ((1693, 1737), 'mindspore.common.initializer.initializer', 'initializer', (['"""ones"""', 'param_shape', 'ms.float32'], {}), "('ones', param_shape, ms.float32)\n", (1704, 1737), False, 'from mindspore.common.initializer import initializer\n'), ((1818, 1838), 'numpy.ones', 'np.ones', (['param_shape'], {}), '(param_shape)\n', (1825, 1838), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 10:59:00 2020
@author: user
"""
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import time
import itertools
import ctypes
def formfactor(args):
# with AL_dist_flat_glo.get_lock:
AL_dist_flat_glo_r = np.frombuffer(AL_dist_flat_glo.get_obj())
AL_dist_flat_glo_s = AL_dist_flat_glo_r.reshape((n_glo.value,m_glo.value))
# ffq = np.sum(np.cos(np.dot(np.logspace(-2,3,100)[args[0]]*np.array([1,0,0]),
# np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T)))
qr = np.logspace(-2,3,100)[args[0]]
rvec = np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T
cosx = np.cos(np.dot(qr*np.array([1,0,0]), rvec))
cosy = np.cos(np.dot(qr*np.array([0,1,0]), rvec))
cosz = np.cos(np.dot(qr*np.array([0,0,1]), rvec))
# cosxy = np.cos(np.dot(qr*np.array([0.707,0.707,0]), rvec))
# cosyz = np.cos(np.dot(qr*np.array([0,0.707,0.707]), rvec))
# cosxz = np.cos(np.dot(qr*np.array([0.707,0,0.707]), rvec))
# cosxyz = np.cos(np.dot(qr*np.array([0.577,0.577,0.577]), rvec))
ffq = np.sum(np.mean(np.array([cosx, cosy, cosz]), axis=0))
return ffq
def parallelinit(AL_dist_flat_glo_, n_glo_, m_glo_):
global AL_dist_flat_glo, n_glo, m_glo
AL_dist_flat_glo = AL_dist_flat_glo_
n_glo = n_glo_
m_glo = m_glo_
if __name__ == '__main__':
AL_dist_flat = np.load(r'./AL_dist_flat.npy')
n = np.shape(AL_dist_flat)[0]
m = np.shape(AL_dist_flat)[1]
q_range = np.logspace(-2,3,100)
# r_x = np.array([1, 0, 0])
# q_range_glo = mp.Array(ctypes.c_double, q_range)
AL_dist_flat_glo = mp.Array(ctypes.c_double, AL_dist_flat.flatten())
n_glo = mp.Value(ctypes.c_int, n)
m_glo = mp.Value(ctypes.c_int, m)
# r_x_glo = mp.Array(ctypes.c_double, r_x)
paramlist = list(itertools.product(range(100), range(n)))
pool = mp.Pool(20, initializer=parallelinit, initargs=(AL_dist_flat_glo, n_glo, m_glo))
t1 = time.time()
results = pool.map(formfactor, paramlist)
pool.close()
t2 = time.time()
print(t2-t1)
np.save(r'./AL_results.npy', results)
Pq = 2*np.divide(np.sum(np.array(results).reshape(100, n), axis=1), n)
# fig = plt.figure(figsize=(8,6))
# plt.plot(q_range, Pq, lw=3, color='tab:orange')
# plt.xscale('log')
# plt.xlabel('$q$', fontsize=15)
# plt.ylabel('$P(q)$', fontsize=15)
# plt.tight_layout()
# plt.savefig(r'./AL_form_factor.pdf', dpi=300, bbox_inches='tight')
# plt.show()
fig = plt.figure(figsize=(8,6))
plt.plot(q_range, Pq, lw=3, color='tab:orange')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$q$', fontsize=15)
plt.ylabel('$P(q)$', fontsize=15)
plt.tight_layout()
plt.savefig(r'./AL_form_factor_log.pdf', dpi=300, bbox_inches='tight')
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"multiprocessing.Value",
"numpy.subtract",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.figure",
"numpy.array",
"multiprocessing.Pool",
"matplotlib.pyplot.tight_layout",
"numpy.shape",
"numpy.logspace",
"time.time",
"numpy.save",
"numpy.load"
]
| [((1464, 1493), 'numpy.load', 'np.load', (['"""./AL_dist_flat.npy"""'], {}), "('./AL_dist_flat.npy')\n", (1471, 1493), True, 'import numpy as np\n'), ((1582, 1605), 'numpy.logspace', 'np.logspace', (['(-2)', '(3)', '(100)'], {}), '(-2, 3, 100)\n', (1593, 1605), True, 'import numpy as np\n'), ((1782, 1807), 'multiprocessing.Value', 'mp.Value', (['ctypes.c_int', 'n'], {}), '(ctypes.c_int, n)\n', (1790, 1807), True, 'import multiprocessing as mp\n'), ((1820, 1845), 'multiprocessing.Value', 'mp.Value', (['ctypes.c_int', 'm'], {}), '(ctypes.c_int, m)\n', (1828, 1845), True, 'import multiprocessing as mp\n'), ((1976, 2061), 'multiprocessing.Pool', 'mp.Pool', (['(20)'], {'initializer': 'parallelinit', 'initargs': '(AL_dist_flat_glo, n_glo, m_glo)'}), '(20, initializer=parallelinit, initargs=(AL_dist_flat_glo, n_glo, m_glo)\n )\n', (1983, 2061), True, 'import multiprocessing as mp\n'), ((2071, 2082), 'time.time', 'time.time', ([], {}), '()\n', (2080, 2082), False, 'import time\n'), ((2165, 2176), 'time.time', 'time.time', ([], {}), '()\n', (2174, 2176), False, 'import time\n'), ((2208, 2244), 'numpy.save', 'np.save', (['"""./AL_results.npy"""', 'results'], {}), "('./AL_results.npy', results)\n", (2215, 2244), True, 'import numpy as np\n'), ((2654, 2680), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (2664, 2680), True, 'import matplotlib.pyplot as plt\n'), ((2684, 2731), 'matplotlib.pyplot.plot', 'plt.plot', (['q_range', 'Pq'], {'lw': '(3)', 'color': '"""tab:orange"""'}), "(q_range, Pq, lw=3, color='tab:orange')\n", (2692, 2731), True, 'import matplotlib.pyplot as plt\n'), ((2736, 2753), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2746, 2753), True, 'import matplotlib.pyplot as plt\n'), ((2758, 2775), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2768, 2775), True, 'import matplotlib.pyplot as plt\n'), ((2780, 2810), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$q$"""'], {'fontsize': '(15)'}), "('$q$', fontsize=15)\n", (2790, 2810), True, 'import matplotlib.pyplot as plt\n'), ((2815, 2848), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P(q)$"""'], {'fontsize': '(15)'}), "('$P(q)$', fontsize=15)\n", (2825, 2848), True, 'import matplotlib.pyplot as plt\n'), ((2853, 2871), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2869, 2871), True, 'import matplotlib.pyplot as plt\n'), ((2876, 2945), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./AL_form_factor_log.pdf"""'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('./AL_form_factor_log.pdf', dpi=300, bbox_inches='tight')\n", (2887, 2945), True, 'import matplotlib.pyplot as plt\n'), ((2951, 2961), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2959, 2961), True, 'import matplotlib.pyplot as plt\n'), ((618, 641), 'numpy.logspace', 'np.logspace', (['(-2)', '(3)', '(100)'], {}), '(-2, 3, 100)\n', (629, 641), True, 'import numpy as np\n'), ((660, 734), 'numpy.subtract', 'np.subtract', (['AL_dist_flat_glo_s[args[1]]', 'AL_dist_flat_glo_s[1 + args[1]:]'], {}), '(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1 + args[1]:])\n', (671, 734), True, 'import numpy as np\n'), ((1508, 1530), 'numpy.shape', 'np.shape', (['AL_dist_flat'], {}), '(AL_dist_flat)\n', (1516, 1530), True, 'import numpy as np\n'), ((1542, 1564), 'numpy.shape', 'np.shape', (['AL_dist_flat'], {}), '(AL_dist_flat)\n', (1550, 1564), True, 'import numpy as np\n'), ((1187, 1215), 'numpy.array', 'np.array', (['[cosx, cosy, cosz]'], {}), '([cosx, cosy, cosz])\n', (1195, 1215), True, 'import numpy as np\n'), ((763, 782), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (771, 782), True, 'import numpy as np\n'), ((817, 836), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (825, 836), True, 'import numpy as np\n'), ((871, 890), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (879, 890), True, 'import numpy as np\n'), ((2279, 2296), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (2287, 2296), True, 'import numpy as np\n')] |
# coding: utf-8
import io
import cairo # pycairo
import cairocffi
from pycairo_to_cairocffi import _UNSAFE_pycairo_context_to_cairocffi
from cairocffi_to_pycairo import _UNSAFE_cairocffi_context_to_pycairo
import pango_example
def test():
cairocffi_context = cairocffi.Context(cairocffi.PDFSurface(None, 10, 20))
cairocffi_context.scale(2, 3)
pycairo_context = _UNSAFE_cairocffi_context_to_pycairo(cairocffi_context)
cairocffi_context2 = _UNSAFE_pycairo_context_to_cairocffi(pycairo_context)
assert tuple(cairocffi_context.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert tuple(cairocffi_context2.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert tuple(pycairo_context.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert cairocffi_context2._pointer == cairocffi_context._pointer
file_obj = io.BytesIO()
# Mostly test that this runs without raising.
pango_example.write_example_pdf(file_obj)
assert file_obj.getvalue().startswith(b'%PDF')
if __name__ == '__main__':
test()
| [
"cairocffi.PDFSurface",
"pango_example.write_example_pdf",
"io.BytesIO",
"pycairo_to_cairocffi._UNSAFE_pycairo_context_to_cairocffi",
"cairocffi_to_pycairo._UNSAFE_cairocffi_context_to_pycairo"
]
| [((392, 447), 'cairocffi_to_pycairo._UNSAFE_cairocffi_context_to_pycairo', '_UNSAFE_cairocffi_context_to_pycairo', (['cairocffi_context'], {}), '(cairocffi_context)\n', (428, 447), False, 'from cairocffi_to_pycairo import _UNSAFE_cairocffi_context_to_pycairo\n'), ((474, 527), 'pycairo_to_cairocffi._UNSAFE_pycairo_context_to_cairocffi', '_UNSAFE_pycairo_context_to_cairocffi', (['pycairo_context'], {}), '(pycairo_context)\n', (510, 527), False, 'from pycairo_to_cairocffi import _UNSAFE_pycairo_context_to_cairocffi\n'), ((831, 843), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (841, 843), False, 'import io\n'), ((900, 941), 'pango_example.write_example_pdf', 'pango_example.write_example_pdf', (['file_obj'], {}), '(file_obj)\n', (931, 941), False, 'import pango_example\n'), ((298, 332), 'cairocffi.PDFSurface', 'cairocffi.PDFSurface', (['None', '(10)', '(20)'], {}), '(None, 10, 20)\n', (318, 332), False, 'import cairocffi\n')] |
import random
goat1 = random.randint(1, 3)
goat2 = random.randint(1, 3)
while goat1 == goat2:
goat2 = random.randint(1, 3)
success = 0
tries = 1_000_000
for _ in range(tries):
options = [1, 2, 3]
choice = random.randint(1, 3)
options.remove(choice)
if choice == goat1:
options.remove(goat2)
else:
options.remove(goat1)
choice = options[0]
if choice != goat1 and choice != goat2:
success = success + 1
print(success / tries)
| [
"random.randint"
]
| [((23, 43), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (37, 43), False, 'import random\n'), ((52, 72), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (66, 72), False, 'import random\n'), ((108, 128), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (122, 128), False, 'import random\n'), ((221, 241), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (235, 241), False, 'import random\n')] |
#!/usr/bin/env python3
import random
N = 32
M = 64
# NOTE: 0 is a reserved value
randu = lambda x: random.randint(1, 2**x-1)
randU32 = lambda: randu(32)
randU64 = lambda: randu(64)
fmt_by_dtype = {
'u32hex': '0x{:08x}',
'u64hex': '0x{:016x}',
}
cpp_by_dtype = {
'u32hex': 'uint32_t',
'u64hex': 'uint64_t',
}
# key = randU32()
# vals = [(key, randU32(), randU64()) for _ in range(N)]
# keys = [(x[0], x[1]) for x in vals]
# success = [random.choice(vals) for _ in range(M)]
# failure = []
keys = [(randU32(),) for _ in range(M)]
vals = [(randU32(), randU64()) for _ in range(N)]
def genval():
y = randU32()
while y in vals:
y = randU32()
return y
miss = [(genval(),) for _ in range(M)]
def print_vector(vals, name, dtypes, indent=0):
indent = ' ' * indent
tabs = indent + ' '
cpptypes = [cpp_by_dtype[dt] for dt in dtypes]
if len(cpptypes) == 1:
cctype = cpptypes[0]
def fmtrow(vs): return vs
else:
cctype = f"std::tuple<{', '.join(cpptypes)}>"
def fmtrow(vs): return f"{{ {vs} }}"
fmts = [fmt_by_dtype[dt] for dt in dtypes]
print(f"{indent}const std::vector<{cctype}> {name} = {{")
rows = [
tabs + fmtrow(', '.join([fmt.format(v) for v, fmt in zip(vs, fmts)])) + ','
for vs in vals
]
print("\n".join(rows))
print(f"{indent}}};")
print('TEST_CASE("Insert random values and look them up", "[gentbl]")')
print('{')
print_vector(keys, name='keys', dtypes=['u32hex'], indent=4)
print()
print_vector(vals, name='vals', dtypes=['u32hex', 'u64hex'], indent=4)
print()
print_vector(miss, name='miss', dtypes=['u32hex'], indent=4)
print()
print('}')
# print("const std::vector<std::tuple<uint32_t, uint32_t, uint64_t>> vs = {")
# for _ in range(N):
# print(" {{ 0x{:08x}, 0x{:08x}, 0x{:016x} }},".format(
# randU32(), randU32(), randU64()))
# print("};")
| [
"random.randint"
]
| [((105, 134), 'random.randint', 'random.randint', (['(1)', '(2 ** x - 1)'], {}), '(1, 2 ** x - 1)\n', (119, 134), False, 'import random\n')] |
# ------------------------------------------------------------------
#
# RDF and CN related analysis
#
# ------------------------------------------------------------------
import sys
py_path = '../../../../postprocessing/'
sys.path.insert(0, py_path)
py_path = '../../../../postprocessing/io_operations/'
sys.path.insert(0, py_path)
import cn_and_rdf_lmp as crl
import io_module as io
#
# Input
#
# RDF and CN intput file
rdf_file = '../nafion.rdf'
# Output file
out_file = 'rdf_cn_averaged.txt'
# Number of bins
nbins = 300
# Number of columns
ncols = 10
crl.compute_time_average(rdf_file, out_file, nbins, ncols)
| [
"cn_and_rdf_lmp.compute_time_average",
"sys.path.insert"
]
| [((225, 252), 'sys.path.insert', 'sys.path.insert', (['(0)', 'py_path'], {}), '(0, py_path)\n', (240, 252), False, 'import sys\n'), ((307, 334), 'sys.path.insert', 'sys.path.insert', (['(0)', 'py_path'], {}), '(0, py_path)\n', (322, 334), False, 'import sys\n'), ((565, 623), 'cn_and_rdf_lmp.compute_time_average', 'crl.compute_time_average', (['rdf_file', 'out_file', 'nbins', 'ncols'], {}), '(rdf_file, out_file, nbins, ncols)\n', (589, 623), True, 'import cn_and_rdf_lmp as crl\n')] |
"""
Test the integrations related to the internal interface implementation and the 'Interface' interface itself
"""
import pytest
from cppython_core.schema import InterfaceConfiguration
from pytest_cppython.plugin import InterfaceIntegrationTests
from cppython.console import ConsoleInterface
class TestCLIInterface(InterfaceIntegrationTests):
"""
The tests for our CLI interface
"""
@pytest.fixture(name="interface")
def fixture_interface(self):
"""
Override of the plugin provided interface fixture.
Returns:
ConsoleInterface -- The Interface object to use for the CPPython defined tests
"""
configuration = InterfaceConfiguration()
return ConsoleInterface(configuration)
| [
"pytest.fixture",
"cppython_core.schema.InterfaceConfiguration",
"cppython.console.ConsoleInterface"
]
| [((406, 438), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""interface"""'}), "(name='interface')\n", (420, 438), False, 'import pytest\n'), ((688, 712), 'cppython_core.schema.InterfaceConfiguration', 'InterfaceConfiguration', ([], {}), '()\n', (710, 712), False, 'from cppython_core.schema import InterfaceConfiguration\n'), ((728, 759), 'cppython.console.ConsoleInterface', 'ConsoleInterface', (['configuration'], {}), '(configuration)\n', (744, 759), False, 'from cppython.console import ConsoleInterface\n')] |
#@contact <NAME> (<EMAIL>), Georgia Institute of Technology
#@version 1.0
#@date 2021-08-17
#Influence-guided Data Augmentation for Neural Tensor Completion (DAIN)
#This software is free of charge under research purposes.
#For commercial purposes, please contact the main author.
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import argparse
import numpy as np
from dataset import TensorDataset
import torch.optim as optim
from model import MLP
import pandas as pd
import copy
import random
from sklearn.model_selection import train_test_split
import os
def parse_args():
parser = argparse.ArgumentParser(description="Run DAIN for the MLP architecture")
parser.add_argument('--path', nargs='?', default='data/synthetic_10K.tensor',
help='Input data path.')
parser.add_argument('--epochs', type=int, default=50,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=1024,
help='Batch size.')
parser.add_argument('--layers', nargs='?', default='[150,1024,1024,128]',
help="Size of each layer. Note that the first layer is the concatenation of tensor embeddings. So layers[0]/N (N=order) is the tensor embedding size.")
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--verbose', type=int, default=5,
help='Show performance per X iterations')
parser.add_argument('--gpu', type=str, default='0',
help='GPU number')
parser.add_argument('--output', type=str, default='demo.txt',
help = 'output name')
parser.add_argument('--train_ratio', type=float, default=0.9,
help = 'Ratio of training data')
return parser.parse_args()
def model_train_and_test(args, model, train_loader, val_loader,test_loader,first):
output_path = 'output/'+args.output
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr = args.lr)
device = model.device
min_val,min_test,min_epoch,final_model = 9999,9999,0,0
for epoch in range(args.epochs):
torch.cuda.empty_cache()
running_loss = 0.0
train_loss,valid_loss = 0,0
for i, data in enumerate(val_loader, 0):
inputs, labels, indices = data[0].to(device), data[1].to(device),data[2]
outputs = model(inputs).flatten()
if first==True:
inter = model.intermediate.cpu().detach().clone()
error = (outputs - labels).reshape(-1,1).cpu().detach().clone()
model.allgrad[epoch,indices,:] = torch.mul(inter,error)
loss = criterion(outputs,labels)
loss.backward()
valid_loss += loss.item()
del inputs,labels,outputs,model.intermediate
valid_loss /= (i+1)
test_loss, test_accuracy = 0,0
for i, data in enumerate(test_loader, 0):
inputs, labels,indices = data[0].to(device), data[1].to(device),data[2]
prediction = model(inputs).flatten()
loss = criterion(prediction,labels)
loss.backward()
test_accuracy += torch.sum(torch.pow((prediction-labels),2)).cpu().item()
del inputs,labels,prediction,model.intermediate
test_accuracy/=len(test_loader.dataset)
for i, data in enumerate(train_loader, 0):
inputs, labels,indices = data[0].to(device), data[1].to(device),data[2]
optimizer.zero_grad()
outputs = model(inputs).flatten()
if first==True:
inter = model.intermediate.cpu().detach().clone()
error = (outputs-labels).reshape(-1,1).cpu().detach().clone()
model.allgrad[epoch,indices,:] = torch.mul(inter,error)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
del inputs, labels, outputs,indices,model.intermediate
train_loss /= (i+1)
if epoch%args.verbose==0:
print('[%d] Train loss: %.3f\tValid loss = %.6f\t(Test RMSE = %.6f)\t' % (epoch + 1, train_loss, valid_loss,test_accuracy))
print('[%d] Train loss: %.3f\tValid loss = %.6f\t(Test RMSE = %.6f)\t' % (epoch + 1, train_loss, valid_loss,test_accuracy),file=open(output_path,"a"),flush=True)
if min_val<=valid_loss and epoch-min_epoch>=10:
break
if min_val>valid_loss:
min_val = valid_loss
min_test = test_accuracy
min_epoch = epoch
final_model = copy.deepcopy(model)
final_model.allgrad = copy.deepcopy(model.allgrad)
final_model.checkpoint = epoch+1
print('Finished Training\nFinal Test RMSE = {} @ (Epoch,validation loss) ({},{})\n'.format(min_test,min_epoch,min_val))
print('Finished Training\nFinal Test RMSE = {} @ (Epoch,validation loss) ({},{})\n'.format(min_test,min_epoch,min_val), file=open(output_path, "a"),flush=True)
del model
return min_test,final_model
def data_augmentation(trainset,new_tensor,new_val,val_loader,test_loader,args,device):
#Step 4: data augmentation
if new_tensor.shape[0]!=0:
cur_trainset = copy.deepcopy(trainset)
new_indices = torch.zeros(new_tensor.shape[0]).long()
cur_trainset.add(new_tensor,new_val,new_indices)
first = False
#Step 1: tensor embedding learning
else:
cur_trainset = copy.deepcopy(trainset)
first = True
layers = eval(args.layers)
train_loader = DataLoader(cur_trainset, batch_size=args.batch_size,shuffle=True)
model = MLP(cur_trainset, device, layers=layers).to(device)
model.allgrad = []
if first==True:
model.allgrad = torch.zeros(int(args.epochs),len(cur_trainset)+len(val_loader.dataset)+len(test_loader.dataset),model.last_size)
test_rmse,final_model = model_train_and_test(args, model, train_loader, val_loader, test_loader,first)
del cur_trainset
if new_tensor.shape[0]!=0:
del new_tensor
if new_val.shape[0]!=0:
del new_val
del model
if first==True:
print('[DONE] Step 1: tensor embedding learning')
#Step 2: cell importance calculation
train_idx,val_idx,test_idx = train_loader.dataset.indices,val_loader.dataset.indices,test_loader.dataset.indices
checkpoint = final_model.checkpoint
val_grad = torch.sum(final_model.allgrad[:checkpoint,val_idx,:],dim=1).squeeze()
maxv,maxp = -9999,0
final_model.importance = np.zeros(len(trainset))
for (i,idx) in enumerate(trainset.indices):
train_grad = final_model.allgrad[:checkpoint,idx,:].squeeze()
contribution = torch.mul(train_grad,val_grad)
final_contribution = torch.sum(torch.sum(contribution,dim=1),dim=0).item()
final_model.importance[i] = final_contribution
final_model.importance = final_model.importance / max(final_model.importance)
return (test_rmse,final_model)
def main():
args = parse_args()
path = args.path
layers = eval(args.layers)
learning_rate = args.lr
batch_size = args.batch_size
epochs = args.epochs
verbose = args.verbose
output_path = 'output/'+args.output
if os.path.exists('output/')==False:
os.mkdir('output/')
dataset = TensorDataset(path)
trainset,valset, testset,indices = copy.deepcopy(dataset),copy.deepcopy(dataset),copy.deepcopy(dataset),np.arange(dataset.num_data)
data_train, data_test, labels_train, labels_test, index_train, index_test = train_test_split(dataset.tensor.numpy(), dataset.val.numpy(), indices, test_size=1-args.train_ratio)
data_train, data_val, labels_train, labels_val, index_train, index_val = train_test_split(data_train, labels_train, index_train, test_size=0.2)
trainset.tensor,trainset.val,trainset.num_data,trainset.indices = torch.from_numpy(data_train).long(),torch.from_numpy(labels_train).float(),data_train.shape[0],torch.from_numpy(index_train).long()
valset.tensor,valset.val,valset.num_data,valset.indices = torch.from_numpy(data_val).long(),torch.from_numpy(labels_val).float(),data_val.shape[0],torch.from_numpy(index_val).long()
testset.tensor, testset.val, testset.num_data,testset.indices = torch.from_numpy(data_test).long(), torch.from_numpy(labels_test).float(), data_test.shape[0],torch.from_numpy(index_test).long()
train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(valset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(testset, batch_size=batch_size, shuffle=True)
print('[DONE] Step 0: Dataset loading & train-val-test split')
print(dataset.dimensionality)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
# CUDA for PyTorch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
#Step 1&2. Train tensor embeddings & calculate cell importance
(rmse,model) = data_augmentation(trainset,torch.empty(0),torch.empty(0),val_loader,test_loader,args,device)
print('Test RMSE before 50% data augmentation = {}'.format(rmse))
print('Test RMSE before 50% data augmentation = {}'.format(rmse),file=open(output_path,"a"))
original = copy.deepcopy(model)
del model
cell_importance = abs(original.importance)
print('[DONE] Step 2: cell importance calculation')
#Step 3. entity importance calculation
entity_importance = [np.zeros(dataset.dimensionality[i]) for i in range(dataset.order)]
for i in range(len(cell_importance)):
for j in range(dataset.order):
entity = int(trainset.tensor[i,j])
entity_importance[j][entity] += cell_importance[i]
for i in range(dataset.order):
cur = entity_importance[i]
entity_importance[i] = cur/sum(cur)
print('[DONE] Step 3: entity importance calculation')
num_aug = int(0.5 * trainset.tensor.shape[0])
print('Number of augmented data = {}\tTotal number of training data = {}'.format(num_aug,num_aug+len(trainset)))
print('Number of augmented data = {}\tTotal number of training data = {}'.format(num_aug,num_aug+len(trainset)), file=open(output_path, "a"),flush=True)
#Step 4. perform data augmentation
indices = np.zeros((num_aug,trainset.order))
for i in range(dataset.order):
indices[:,i] = np.random.choice(list(range(0,dataset.dimensionality[i])),size=num_aug,p = entity_importance[i])
new_tensor = torch.from_numpy(indices).long()
new_val = original.predict(new_tensor)
print('[DONE] Step 4: data augmentation with entity importance')
(rmse,model) = data_augmentation(trainset,new_tensor,new_val,val_loader,test_loader,args,device)
print('Test RMSE after 50% data augmentation = {}'.format(rmse))
print('Test RMSE after 50% data augmentation = {}'.format(rmse),file=open(output_path,"a"))
del model
if __name__ == "__main__":
main()
| [
"torch.mul",
"model.MLP",
"torch.from_numpy",
"torch.pow",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.sum",
"copy.deepcopy",
"numpy.arange",
"os.path.exists",
"argparse.ArgumentParser",
"os.mkdir",
"sklearn.model_selection.train_test_split",
"torch.empty",
"torch.cuda.empty_cache",
"numpy.zeros",
"torch.utils.data.DataLoader",
"dataset.TensorDataset",
"torch.zeros"
]
| [((636, 708), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run DAIN for the MLP architecture"""'}), "(description='Run DAIN for the MLP architecture')\n", (659, 708), False, 'import argparse\n'), ((2034, 2046), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2044, 2046), False, 'from torch import nn\n'), ((5710, 5776), 'torch.utils.data.DataLoader', 'DataLoader', (['cur_trainset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(cur_trainset, batch_size=args.batch_size, shuffle=True)\n', (5720, 5776), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((7519, 7538), 'dataset.TensorDataset', 'TensorDataset', (['path'], {}), '(path)\n', (7532, 7538), False, 'from dataset import TensorDataset\n'), ((7934, 8004), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_train', 'labels_train', 'index_train'], {'test_size': '(0.2)'}), '(data_train, labels_train, index_train, test_size=0.2)\n', (7950, 8004), False, 'from sklearn.model_selection import train_test_split\n'), ((8611, 8668), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(trainset, batch_size=batch_size, shuffle=True)\n', (8621, 8668), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((8686, 8741), 'torch.utils.data.DataLoader', 'DataLoader', (['valset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(valset, batch_size=batch_size, shuffle=True)\n', (8696, 8741), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((8760, 8816), 'torch.utils.data.DataLoader', 'DataLoader', (['testset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(testset, batch_size=batch_size, shuffle=True)\n', (8770, 8816), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((9504, 9524), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (9517, 9524), False, 'import copy\n'), ((10528, 10563), 'numpy.zeros', 'np.zeros', (['(num_aug, trainset.order)'], {}), '((num_aug, trainset.order))\n', (10536, 10563), True, 'import numpy as np\n'), ((2240, 2264), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (2262, 2264), False, 'import torch\n'), ((5377, 5400), 'copy.deepcopy', 'copy.deepcopy', (['trainset'], {}), '(trainset)\n', (5390, 5400), False, 'import copy\n'), ((5614, 5637), 'copy.deepcopy', 'copy.deepcopy', (['trainset'], {}), '(trainset)\n', (5627, 5637), False, 'import copy\n'), ((7438, 7463), 'os.path.exists', 'os.path.exists', (['"""output/"""'], {}), "('output/')\n", (7452, 7463), False, 'import os\n'), ((7480, 7499), 'os.mkdir', 'os.mkdir', (['"""output/"""'], {}), "('output/')\n", (7488, 7499), False, 'import os\n'), ((7578, 7600), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (7591, 7600), False, 'import copy\n'), ((7601, 7623), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (7614, 7623), False, 'import copy\n'), ((7624, 7646), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (7637, 7646), False, 'import copy\n'), ((7647, 7674), 'numpy.arange', 'np.arange', (['dataset.num_data'], {}), '(dataset.num_data)\n', (7656, 7674), True, 'import numpy as np\n'), ((9256, 9270), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (9267, 9270), False, 'import torch\n'), ((9271, 9285), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (9282, 9285), False, 'import torch\n'), ((9712, 9747), 'numpy.zeros', 'np.zeros', (['dataset.dimensionality[i]'], {}), '(dataset.dimensionality[i])\n', (9720, 9747), True, 'import numpy as np\n'), ((4733, 4753), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (4746, 4753), False, 'import copy\n'), ((4788, 4816), 'copy.deepcopy', 'copy.deepcopy', (['model.allgrad'], {}), '(model.allgrad)\n', (4801, 4816), False, 'import copy\n'), ((5788, 5828), 'model.MLP', 'MLP', (['cur_trainset', 'device'], {'layers': 'layers'}), '(cur_trainset, device, layers=layers)\n', (5791, 5828), False, 'from model import MLP\n'), ((6887, 6918), 'torch.mul', 'torch.mul', (['train_grad', 'val_grad'], {}), '(train_grad, val_grad)\n', (6896, 6918), False, 'import torch\n'), ((9082, 9107), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9105, 9107), False, 'import torch\n'), ((10736, 10761), 'torch.from_numpy', 'torch.from_numpy', (['indices'], {}), '(indices)\n', (10752, 10761), False, 'import torch\n'), ((2733, 2756), 'torch.mul', 'torch.mul', (['inter', 'error'], {}), '(inter, error)\n', (2742, 2756), False, 'import torch\n'), ((3887, 3910), 'torch.mul', 'torch.mul', (['inter', 'error'], {}), '(inter, error)\n', (3896, 3910), False, 'import torch\n'), ((5423, 5455), 'torch.zeros', 'torch.zeros', (['new_tensor.shape[0]'], {}), '(new_tensor.shape[0])\n', (5434, 5455), False, 'import torch\n'), ((6579, 6641), 'torch.sum', 'torch.sum', (['final_model.allgrad[:checkpoint, val_idx, :]'], {'dim': '(1)'}), '(final_model.allgrad[:checkpoint, val_idx, :], dim=1)\n', (6588, 6641), False, 'import torch\n'), ((8075, 8103), 'torch.from_numpy', 'torch.from_numpy', (['data_train'], {}), '(data_train)\n', (8091, 8103), False, 'import torch\n'), ((8111, 8141), 'torch.from_numpy', 'torch.from_numpy', (['labels_train'], {}), '(labels_train)\n', (8127, 8141), False, 'import torch\n'), ((8170, 8199), 'torch.from_numpy', 'torch.from_numpy', (['index_train'], {}), '(index_train)\n', (8186, 8199), False, 'import torch\n'), ((8269, 8295), 'torch.from_numpy', 'torch.from_numpy', (['data_val'], {}), '(data_val)\n', (8285, 8295), False, 'import torch\n'), ((8303, 8331), 'torch.from_numpy', 'torch.from_numpy', (['labels_val'], {}), '(labels_val)\n', (8319, 8331), False, 'import torch\n'), ((8358, 8385), 'torch.from_numpy', 'torch.from_numpy', (['index_val'], {}), '(index_val)\n', (8374, 8385), False, 'import torch\n'), ((8461, 8488), 'torch.from_numpy', 'torch.from_numpy', (['data_test'], {}), '(data_test)\n', (8477, 8488), False, 'import torch\n'), ((8497, 8526), 'torch.from_numpy', 'torch.from_numpy', (['labels_test'], {}), '(labels_test)\n', (8513, 8526), False, 'import torch\n'), ((8555, 8583), 'torch.from_numpy', 'torch.from_numpy', (['index_test'], {}), '(index_test)\n', (8571, 8583), False, 'import torch\n'), ((6961, 6991), 'torch.sum', 'torch.sum', (['contribution'], {'dim': '(1)'}), '(contribution, dim=1)\n', (6970, 6991), False, 'import torch\n'), ((3290, 3323), 'torch.pow', 'torch.pow', (['(prediction - labels)', '(2)'], {}), '(prediction - labels, 2)\n', (3299, 3323), False, 'import torch\n')] |
from datetime import datetime, timedelta
import jwt
from flask import current_app
from app import db
from app.user.repository import UserRepository
class AuthService:
def __init__(self) -> None:
self._user_repository = UserRepository(db.session)
def create_token(self, data) -> dict:
user = self._user_repository.find_one(user_id=data["user_id"])
if user is None:
# user not found
raise RuntimeError
if not user.check_password(data["password"]):
# password
raise RuntimeError
access_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(minutes=60),
"user_id": str(user.id),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
refresh_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(hours=4),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
return {"access_token": access_token, "refresh_token": refresh_token}
def validate_token(self, token) -> dict:
return jwt.decode(token, current_app.config["SECRET_KEY"], algorithms=["HS512"])
def refresh_token(self, token) -> dict:
payload = self.validate_token(token)
user = self._user_repository.find_one(id=payload["user_id"])
if user is None:
# user not found
raise RuntimeError
access_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(minutes=60),
"user_id": str(user.id),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
return {"access_token": access_token}
| [
"jwt.decode",
"datetime.timedelta",
"app.user.repository.UserRepository",
"datetime.datetime.utcnow"
]
| [((235, 261), 'app.user.repository.UserRepository', 'UserRepository', (['db.session'], {}), '(db.session)\n', (249, 261), False, 'from app.user.repository import UserRepository\n'), ((1267, 1340), 'jwt.decode', 'jwt.decode', (['token', "current_app.config['SECRET_KEY']"], {'algorithms': "['HS512']"}), "(token, current_app.config['SECRET_KEY'], algorithms=['HS512'])\n", (1277, 1340), False, 'import jwt\n'), ((643, 660), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (658, 660), False, 'from datetime import datetime, timedelta\n'), ((944, 961), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (959, 961), False, 'from datetime import datetime, timedelta\n'), ((1658, 1675), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1673, 1675), False, 'from datetime import datetime, timedelta\n'), ((685, 702), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (700, 702), False, 'from datetime import datetime, timedelta\n'), ((705, 726), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(60)'}), '(minutes=60)\n', (714, 726), False, 'from datetime import datetime, timedelta\n'), ((986, 1003), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1001, 1003), False, 'from datetime import datetime, timedelta\n'), ((1006, 1024), 'datetime.timedelta', 'timedelta', ([], {'hours': '(4)'}), '(hours=4)\n', (1015, 1024), False, 'from datetime import datetime, timedelta\n'), ((1700, 1717), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1715, 1717), False, 'from datetime import datetime, timedelta\n'), ((1720, 1741), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(60)'}), '(minutes=60)\n', (1729, 1741), False, 'from datetime import datetime, timedelta\n')] |
import os
from Bio import AlignIO, Phylo
from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor
class Phylogenetic:
def __init__(self, PATH):
self.PATH=PATH
def binary_sequence_generator(self, input_kmer_pattern, label):
string_inp="".join([ 'A' if x==0 else 'C' for x in input_kmer_pattern])
return([">"+label,string_inp])
def multifasta_fille_generator(self, converted_sequences_phyolgenetic):
file_output = open(os.path.join(self.PATH,"binary_presence_absence_kmers.fasta"), "w")
file_output.writelines('\n'.join(converted_sequences_phyolgenetic) + '\n' )
file_output.close()
def distance_matrix_generator(self):
align = AlignIO.read(os.path.join(self.PATH,"binary_presence_absence_kmers.fasta"), "fasta")
calculator = DistanceCalculator('identity')
distMatrix = calculator.get_distance(align)
return(distMatrix)
def distance_tree_file_generator(self,distance_matrix):
constructor = DistanceTreeConstructor()
UPGMATree = constructor.upgma(distance_matrix)
Phylo.write(UPGMATree, os.path.join(self.PATH,"binary_presence_absence_kmers.tre") , "newick") | [
"Bio.Phylo.TreeConstruction.DistanceTreeConstructor",
"os.path.join",
"Bio.Phylo.TreeConstruction.DistanceCalculator"
]
| [((850, 880), 'Bio.Phylo.TreeConstruction.DistanceCalculator', 'DistanceCalculator', (['"""identity"""'], {}), "('identity')\n", (868, 880), False, 'from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor\n'), ((1047, 1072), 'Bio.Phylo.TreeConstruction.DistanceTreeConstructor', 'DistanceTreeConstructor', ([], {}), '()\n', (1070, 1072), False, 'from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor\n'), ((494, 556), 'os.path.join', 'os.path.join', (['self.PATH', '"""binary_presence_absence_kmers.fasta"""'], {}), "(self.PATH, 'binary_presence_absence_kmers.fasta')\n", (506, 556), False, 'import os\n'), ((757, 819), 'os.path.join', 'os.path.join', (['self.PATH', '"""binary_presence_absence_kmers.fasta"""'], {}), "(self.PATH, 'binary_presence_absence_kmers.fasta')\n", (769, 819), False, 'import os\n'), ((1159, 1219), 'os.path.join', 'os.path.join', (['self.PATH', '"""binary_presence_absence_kmers.tre"""'], {}), "(self.PATH, 'binary_presence_absence_kmers.tre')\n", (1171, 1219), False, 'import os\n')] |
import torch
import functools
if torch.__version__.startswith('0'):
from .sync_bn.inplace_abn.bn import InPlaceABNSync
BatchNorm2d = functools.partial(InPlaceABNSync, activation='none')
BatchNorm2d_class = InPlaceABNSync
relu_inplace = False
else:
# BatchNorm2d_class = BatchNorm2d = torch.nn.SyncBatchNorm
BatchNorm2d_class = BatchNorm2d = torch.nn.BatchNorm2d
relu_inplace = True | [
"torch.__version__.startswith",
"functools.partial"
]
| [((34, 67), 'torch.__version__.startswith', 'torch.__version__.startswith', (['"""0"""'], {}), "('0')\n", (62, 67), False, 'import torch\n'), ((142, 194), 'functools.partial', 'functools.partial', (['InPlaceABNSync'], {'activation': '"""none"""'}), "(InPlaceABNSync, activation='none')\n", (159, 194), False, 'import functools\n')] |
import os
import sys
import numpy as np
import pandas as pd
def get_columns_percent_dataframe(df: pd.DataFrame, totals_column=None, percent_names=True) -> pd.DataFrame:
""" @param totals_column: (default = use sum of columns)
@param percent_names: Rename names from 'col' => 'col %'
Return a dataframe as a percentage of totals_column if provided, or sum of columns """
percent_df = pd.DataFrame(index=df.index)
columns = df.columns
if totals_column:
totals_series = df[totals_column]
columns = columns - [totals_column]
else:
totals_series = df.sum(axis=1)
for col in columns:
new_col = col
if percent_names:
new_col = f"{new_col} %"
multiplier = 100.0 # to get percent
percent_df[new_col] = multiplier * df[col] / totals_series
return percent_df
def get_rows_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
row_sums = df.sum(axis=0)
return df.multiply(100.0) / row_sums
def get_total_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
total = df.sum(axis=0).sum()
return df.multiply(100.0) / total
def df_handle_below_minimum_floats(df: pd.DataFrame) -> pd.DataFrame:
def handle_if_below_min(series):
if series.dtype == 'd':
too_small_mask = abs(series) < sys.float_info.min
series[too_small_mask] = sys.float_info.min
return series
return df.apply(handle_if_below_min, axis=0)
def nan_to_none(val):
if np.isnan(val):
val = None
return val
def df_nan_to_none(df: pd.DataFrame) -> pd.DataFrame:
return df.where((pd.notnull(df)), None)
def df_replace_nan(df: pd.DataFrame, nan_replace='') -> pd.DataFrame:
return df.where((pd.notnull(df)), nan_replace)
def read_csv_skip_header(fle, header='#', **kwargs) -> pd.DataFrame:
if os.stat(fle).st_size == 0:
raise ValueError("File is empty")
with open(fle) as f:
pos = 0
cur_line = f.readline()
while cur_line.startswith(header):
pos = f.tell()
cur_line = f.readline()
f.seek(pos)
return pd.read_csv(f, **kwargs)
| [
"pandas.read_csv",
"numpy.isnan",
"pandas.DataFrame",
"os.stat",
"pandas.notnull"
]
| [((407, 435), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index'}), '(index=df.index)\n', (419, 435), True, 'import pandas as pd\n'), ((1630, 1643), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (1638, 1643), True, 'import numpy as np\n'), ((1756, 1770), 'pandas.notnull', 'pd.notnull', (['df'], {}), '(df)\n', (1766, 1770), True, 'import pandas as pd\n'), ((1872, 1886), 'pandas.notnull', 'pd.notnull', (['df'], {}), '(df)\n', (1882, 1886), True, 'import pandas as pd\n'), ((2263, 2287), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f, **kwargs)\n', (2274, 2287), True, 'import pandas as pd\n'), ((1980, 1992), 'os.stat', 'os.stat', (['fle'], {}), '(fle)\n', (1987, 1992), False, 'import os\n')] |
from SemiBin.main import generate_data_single
import os
import pytest
import logging
import pandas as pd
def test_generate_data_coassembly():
logger = logging.getLogger('SemiBin')
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
logger.addHandler(sh)
os.makedirs('output_coassembly',exist_ok=True)
generate_data_single(bams=['test/coassembly_sample_data/input.sorted1.bam',
'test/coassembly_sample_data/input.sorted2.bam',
'test/coassembly_sample_data/input.sorted3.bam',
'test/coassembly_sample_data/input.sorted4.bam',
'test/coassembly_sample_data/input.sorted5.bam'],
num_process=1,
logger=logger,
output='output_coassembly',
handle='test/coassembly_sample_data/input.fasta',
binned_short=False,
must_link_threshold=4000
)
data = pd.read_csv('output_coassembly/data.csv',index_col=0)
data_split = pd.read_csv('output_coassembly/data_split.csv',index_col=0)
assert data.shape == (40,141)
assert data_split.shape == (80,141) | [
"logging.getLogger",
"logging.StreamHandler",
"pandas.read_csv",
"os.makedirs",
"logging.Formatter",
"SemiBin.main.generate_data_single"
]
| [((156, 184), 'logging.getLogger', 'logging.getLogger', (['"""SemiBin"""'], {}), "('SemiBin')\n", (173, 184), False, 'import logging\n'), ((228, 251), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (249, 251), False, 'import logging\n'), ((351, 398), 'os.makedirs', 'os.makedirs', (['"""output_coassembly"""'], {'exist_ok': '(True)'}), "('output_coassembly', exist_ok=True)\n", (362, 398), False, 'import os\n'), ((402, 858), 'SemiBin.main.generate_data_single', 'generate_data_single', ([], {'bams': "['test/coassembly_sample_data/input.sorted1.bam',\n 'test/coassembly_sample_data/input.sorted2.bam',\n 'test/coassembly_sample_data/input.sorted3.bam',\n 'test/coassembly_sample_data/input.sorted4.bam',\n 'test/coassembly_sample_data/input.sorted5.bam']", 'num_process': '(1)', 'logger': 'logger', 'output': '"""output_coassembly"""', 'handle': '"""test/coassembly_sample_data/input.fasta"""', 'binned_short': '(False)', 'must_link_threshold': '(4000)'}), "(bams=['test/coassembly_sample_data/input.sorted1.bam',\n 'test/coassembly_sample_data/input.sorted2.bam',\n 'test/coassembly_sample_data/input.sorted3.bam',\n 'test/coassembly_sample_data/input.sorted4.bam',\n 'test/coassembly_sample_data/input.sorted5.bam'], num_process=1, logger\n =logger, output='output_coassembly', handle=\n 'test/coassembly_sample_data/input.fasta', binned_short=False,\n must_link_threshold=4000)\n", (422, 858), False, 'from SemiBin.main import generate_data_single\n'), ((1141, 1195), 'pandas.read_csv', 'pd.read_csv', (['"""output_coassembly/data.csv"""'], {'index_col': '(0)'}), "('output_coassembly/data.csv', index_col=0)\n", (1152, 1195), True, 'import pandas as pd\n'), ((1212, 1272), 'pandas.read_csv', 'pd.read_csv', (['"""output_coassembly/data_split.csv"""'], {'index_col': '(0)'}), "('output_coassembly/data_split.csv', index_col=0)\n", (1223, 1272), True, 'import pandas as pd\n'), ((272, 318), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(message)s"""'], {}), "('%(asctime)s - %(message)s')\n", (289, 318), False, 'import logging\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from cx_Freeze import setup,Executable
icondata='icon.ico'
base = None
# GUI=有効, CUI=無効 にする
if sys.platform == 'win32' : base = 'win32GUI'
exe = Executable(script = 'main.py',
base = base,
#icon=icondata
)
setup(name = 'MSman',
version = '0.1',
description = 'Minecraft Server Manager',
executables = [exe]
) | [
"cx_Freeze.Executable",
"cx_Freeze.setup"
]
| [((209, 248), 'cx_Freeze.Executable', 'Executable', ([], {'script': '"""main.py"""', 'base': 'base'}), "(script='main.py', base=base)\n", (219, 248), False, 'from cx_Freeze import setup, Executable\n'), ((326, 423), 'cx_Freeze.setup', 'setup', ([], {'name': '"""MSman"""', 'version': '"""0.1"""', 'description': '"""Minecraft Server Manager"""', 'executables': '[exe]'}), "(name='MSman', version='0.1', description='Minecraft Server Manager',\n executables=[exe])\n", (331, 423), False, 'from cx_Freeze import setup, Executable\n')] |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import shellmodelutilities as smutil
# Set bin width and range
bin_width = 0.20
Emax = 14
Nbins = int(np.ceil(Emax/bin_width))
Emax_adjusted = bin_width*Nbins # Trick to get an integer number of bins
bins = np.linspace(0,Emax_adjusted,Nbins+1)
# Define list of calculation input files and corresponding label names
inputfile = "summary_Zn70_jun45.txt"
# Instantiate figure which we will fill
f_rho, ax_rho = plt.subplots(1,1)
# Read energy levels from file
levels = smutil.read_energy_levels(inputfile)
# Choose which [2*J,pi] combinations to include in partial level density plot
Jpi_list = [[0,-1],[2,-1],[4,-1],[6,-1],[8,-1],[10,-1],[12,-1],[14,-1],[16,-1],[18,-1],[20,-1],[22,-1],[24,-1],[26,-1],[28,-1],
[0,+1],[2,+1],[4,+1],[6,+1],[8,+1],[10,+1],[12,+1],[14,+1],[16,+1],[18,+1],[20,+1],[22,+1],[24,+1],[26,+1],[28,+1]]
# Allocate (Ex,Jpi) matrix to store partial level density
rho_ExJpi = np.zeros((Nbins,len(Jpi_list)))
# Count number of levels for each (Ex, J, pi) pixel.
Egs = levels[0,0] # Ground state energy
for i_l in range(len(levels[:,0])):
E, J, pi = levels[i_l]
# Skip if level is outside range:
if E-Egs >= Emax:
continue
i_Ex = int(np.floor((E-Egs)/bin_width))
try:
i_Jpi = Jpi_list.index([J,pi])
except:
continue
rho_ExJpi[i_Ex,i_Jpi] += 1
rho_ExJpi /= bin_width # Normalize to bin width, to get density in MeV^-1
# Plot it
from matplotlib.colors import LogNorm # To get log scaling on the z axis
colorbar_object = ax_rho.pcolormesh(np.linspace(0,len(Jpi_list)-1,len(Jpi_list)), bins, rho_ExJpi, norm=LogNorm())
f_rho.colorbar(colorbar_object) # Add colorbar to plot
# Make the plot nice
ax_rho.set_xlabel(r"$\pi\cdot J\,\mathrm{(\hbar)}$")
ax_rho.set_ylabel(r'$E_x \, \mathrm{(MeV)}$')
# A bit of Python voodoo to get the x labels right:
Jpi_array = np.append(np.linspace(0,-int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2)),np.linspace(0,int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2))) # Array of pi*J for plot
def format_func(value, tick_number):
if value >= 0 and value <= 28:
return int(Jpi_array[int(value)])
else:
return None
ax_rho.set_xlim([0,29])
ax_rho.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax_rho.set_xticks([0,2,4,6,8,10,12,14,15,17,19,21,23,25,27])
# Show plot
plt.show()
| [
"shellmodelutilities.read_energy_levels",
"numpy.ceil",
"numpy.floor",
"numpy.linspace",
"matplotlib.pyplot.FuncFormatter",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
]
| [((295, 335), 'numpy.linspace', 'np.linspace', (['(0)', 'Emax_adjusted', '(Nbins + 1)'], {}), '(0, Emax_adjusted, Nbins + 1)\n', (306, 335), True, 'import numpy as np\n'), ((498, 516), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (510, 516), True, 'import matplotlib.pyplot as plt\n'), ((557, 593), 'shellmodelutilities.read_energy_levels', 'smutil.read_energy_levels', (['inputfile'], {}), '(inputfile)\n', (582, 593), True, 'import shellmodelutilities as smutil\n'), ((2399, 2409), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2407, 2409), True, 'import matplotlib.pyplot as plt\n'), ((190, 215), 'numpy.ceil', 'np.ceil', (['(Emax / bin_width)'], {}), '(Emax / bin_width)\n', (197, 215), True, 'import numpy as np\n'), ((2293, 2323), 'matplotlib.pyplot.FuncFormatter', 'plt.FuncFormatter', (['format_func'], {}), '(format_func)\n', (2310, 2323), True, 'import matplotlib.pyplot as plt\n'), ((1280, 1311), 'numpy.floor', 'np.floor', (['((E - Egs) / bin_width)'], {}), '((E - Egs) / bin_width)\n', (1288, 1311), True, 'import numpy as np\n'), ((1681, 1690), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (1688, 1690), False, 'from matplotlib.colors import LogNorm\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
sbpy bandpass Module
"""
__all__ = [
'bandpass'
]
import os
from astropy.utils.data import get_pkg_data_filename
def bandpass(name):
"""Retrieve bandpass transmission spectrum from sbpy.
Parameters
----------
name : string
Name of the bandpass, case insensitive. See notes for
available filters.
Returns
-------
bp : `~synphot.SpectralElement`
Notes
-----
Available filters:
+-------------+---------------------------+
| Name | Source |
+=============+===========================+
| 2MASS J | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS H | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS Ks | Cohen et al. 2003 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| PS1 g | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 r | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 i | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 w | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 y | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 z | Tonry et al. 2012 |
+-------------+---------------------------+
| SDSS u | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS g | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS r | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS i | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS z | SDSS, dated 2001 |
+-------------+---------------------------+
| WFC3 F438W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WFC3 F606W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WISE W1 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W2 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W3 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W4 | Jarrett et al. 2011 |
+-------------+---------------------------+
References
----------
.. [CDBS] Space Telescope Science Institute. HST Calibration Reference
Data System. https://hst-crds.stsci.edu/ .
.. [COH03] <NAME>. et al. 2003. Spectral Irradiance Calibration
in the Infrared. XIV. The Absolute Calibration of 2MASS. AJ
126, 1090.
.. [JAR11] <NAME>. et al. 2011. The Spitzer-WISE Survey of
the Ecliptic Poles. ApJ 735, 112.
.. [SDSS] Sloan Digital Sky Survey. Camera.
www.sdss.org/instruments/camera .
.. [TON12] <NAME>. et al. 2012. The Pan-STARRS1 Photometric
System. ApJ 750, 99.
"""
try:
import synphot
except ImportError:
raise ImportError('synphot is required.')
name2file = {
'2mass j': '2mass-j-rsr.txt',
'2mass h': '2mass-h-rsr.txt',
'2mass ks': '2mass-ks-rsr.txt',
'cousins r': 'cousins_r_004_syn.fits',
'cousins i': 'cousins_i_004_syn.fits',
'johnson u': 'johnson_u_004_syn.fits',
'johnson b': 'johnson_b_004_syn.fits',
'johnson v': 'johnson_v_004_syn.fits',
'ps1 g': 'ps1-gp1.txt',
'ps1 r': 'ps1-rp1.txt',
'ps1 i': 'ps1-ip1.txt',
'ps1 w': 'ps1-wp1.txt',
'ps1 y': 'ps1-yp1.txt',
'ps1 z': 'ps1-zp1.txt',
'sdss u': 'sdss-u.fits',
'sdss g': 'sdss-g.fits',
'sdss r': 'sdss-r.fits',
'sdss i': 'sdss-i.fits',
'sdss z': 'sdss-z.fits',
'wfc3 f438w': 'wfc3_uvis_f438w_004_syn.fits',
'wfc3 f606w': 'wfc3_uvis_f606w_004_syn.fits',
'wise w1': 'WISE-RSR-W1.EE.txt',
'wise w2': 'WISE-RSR-W2.EE.txt',
'wise w3': 'WISE-RSR-W3.EE.txt',
'wise w4': 'WISE-RSR-W4.EE.txt',
}
fn = get_pkg_data_filename(os.path.join(
'..', 'photometry', 'data', name2file[name.lower()]))
bp = synphot.SpectralElement.from_file(fn)
return bp
| [
"synphot.SpectralElement.from_file"
]
| [((4891, 4928), 'synphot.SpectralElement.from_file', 'synphot.SpectralElement.from_file', (['fn'], {}), '(fn)\n', (4924, 4928), False, 'import synphot\n')] |
from django.http import HttpResponse
from django.shortcuts import render, redirect
from community.models import Community
# Create your views here.
def search_basic(request):
communities = None
if request.POST:
community_query = request.POST.get('community_search', False)
communities = Community.objects.filter(city__icontains=community_query)
print(communities)
return render(request, 'search/search_basic.html', {'communities': communities})
return render(request, 'search/search_basic.html', {'communities': communities})
| [
"django.shortcuts.render",
"community.models.Community.objects.filter"
]
| [((499, 572), 'django.shortcuts.render', 'render', (['request', '"""search/search_basic.html"""', "{'communities': communities}"], {}), "(request, 'search/search_basic.html', {'communities': communities})\n", (505, 572), False, 'from django.shortcuts import render, redirect\n'), ((314, 371), 'community.models.Community.objects.filter', 'Community.objects.filter', ([], {'city__icontains': 'community_query'}), '(city__icontains=community_query)\n', (338, 371), False, 'from community.models import Community\n'), ((414, 487), 'django.shortcuts.render', 'render', (['request', '"""search/search_basic.html"""', "{'communities': communities}"], {}), "(request, 'search/search_basic.html', {'communities': communities})\n", (420, 487), False, 'from django.shortcuts import render, redirect\n')] |
# OxfordInstruments_ILM200.py class, to perform the communication between the Wrapper and the device
# Copyright (c) 2017 QuTech (Delft)
# Code is available under the available under the `MIT open-source license <https://opensource.org/licenses/MIT>`__
#
# <NAME> <<EMAIL>>, 2017
# <NAME> <<EMAIL>>, 2016
# <NAME> <<EMAIL>>, 2009
# <NAME> <<EMAIL>>, 2009
from time import sleep
import visa
import logging
from qcodes import VisaInstrument
class OxfordInstruments_ILM200(VisaInstrument):
"""
This is the qcodes driver for the Oxford Instruments ILM 200 Helium Level Meter.
Usage:
Initialize with
<name> = instruments.create('name', 'OxfordInstruments_ILM200', address='<Instrument address>')
<Instrument address> = ASRL4::INSTR
Note: Since the ISOBUS allows for several instruments to be managed in parallel, the command
which is sent to the device starts with '@n', where n is the ISOBUS instrument number.
"""
def __init__(self, name, address, number=1, **kwargs):
"""
Initializes the Oxford Instruments ILM 200 Helium Level Meter.
Args:
name (str): name of the instrument
address (str): instrument address
number (int): ISOBUS instrument number (number=1 is specific to the ILM in F008)
Returns:
None
"""
logging.debug(__name__ + ' : Initializing instrument')
super().__init__(name, address, **kwargs)
self.visa_handle.set_visa_attribute(visa.constants.VI_ATTR_ASRL_STOP_BITS,
visa.constants.VI_ASRL_STOP_TWO)
self._address = address
self._number = number
self._values = {}
self.add_parameter('level',
label='level',
get_cmd=self._do_get_level,
unit='%')
self.add_parameter('status',
get_cmd=self._do_get_status)
self.add_parameter('rate',
get_cmd=self._do_get_rate,
set_cmd=self._do_set_rate)
# a dummy command to avoid the initial error
try:
self.get_idn()
sleep(70e-3) # wait for the device to be able to respond
self._read() # to flush the buffer
except Exception as ex:
logging.debug(ex)
def _execute(self, message):
"""
Write a command to the device and read answer. This function writes to
the buffer by adding the device number at the front, instead of 'ask'.
Args:
message (str) : write command for the device
Returns:
None
"""
logging.info(
__name__ + ' : Send the following command to the device: %s' % message)
self.visa_handle.write('@%s%s' % (self._number, message))
sleep(70e-3) # wait for the device to be able to respond
result = self._read()
if result.find('?') >= 0:
print("Error: Command %s not recognized" % message)
else:
return result
def _read(self):
"""
Reads the total bytes in the buffer and outputs as a string.
Args:
None
Returns:
message (str)
"""
# because protocol has no termination chars the read reads the number
# of bytes in the buffer
bytes_in_buffer = self.visa_handle.bytes_in_buffer
# a workaround for a timeout error in the pyvsia read_raw() function
with(self.visa_handle.ignore_warning(visa.constants.VI_SUCCESS_MAX_CNT)):
mes = self.visa_handle.visalib.read(
self.visa_handle.session, bytes_in_buffer)
# cannot be done on same line for some reason
mes = str(mes[0].decode())
return mes
def get_idn(self):
"""
Overrides the function of Instrument since ILM does not support `*IDN?`
This string is supposed to be a
comma-separated list of vendor, model, serial, and firmware, but
semicolon and colon are also common separators so we accept them here
as well.
Returns:
A dict containing vendor, model, serial, and firmware.
"""
try:
idstr = '' # in case self.ask fails
idstr = self._get_version().split()
# form is supposed to be comma-separated, but we've seen
# other separators occasionally
idparts = [idstr[3] + ' ' + idstr[4], idstr[0], idstr[5],
idstr[1] + ' ' + idstr[2]]
# in case parts at the end are missing, fill in None
if len(idparts) < 4:
idparts += [None] * (4 - len(idparts))
except Exception as ex:
logging.warn('Error getting or interpreting *IDN?: ' + repr(idstr))
logging.debug(ex)
idparts = [None, None, None, None]
return dict(zip(('vendor', 'model', 'serial', 'firmware'), idparts))
def get_all(self):
"""
Reads all implemented parameters from the instrument,
and updates the wrapper.
"""
logging.info(__name__ + ' : reading all settings from instrument')
self.level.get()
self.status.get()
self.rate.get()
def close(self):
"""
Safely close connection
"""
logging.info(__name__ + ' : Closing ILM200 connection')
self.local()
super().close()
# Functions: Monitor commands
def _get_version(self):
"""
Identify the device
Args:
None
Returns:
identification (str): should be 'ILM200 Version 1.08 (c) OXFORD 1994\r'
"""
logging.info(__name__ + ' : Identify the device')
return self._execute('V')
def _do_get_level(self):
"""
Get Helium level of channel 1.
Args:
None
Returns:
result (float) : Helium level
"""
logging.info(__name__ + ' : Read level of channel 1')
result = self._execute('R1')
return float(result.replace("R", "")) / 10
def _do_get_status(self):
"""
Get status of the device.
"""
logging.info(__name__ + ' : Get status of the device.')
result = self._execute('X')
usage = {
0: "Channel not in use",
1: "Channel used for Nitrogen level",
2: "Channel used for Helium Level (Normal pulsed operation)",
3: "Channel used for Helium Level (Continuous measurement)",
9: "Error on channel (Usually means probe unplugged)"
}
# current_flowing = {
# 0 : "Curent not flowing in Helium Probe Wire",
# 1 : "Curent not flowing in Helium Probe Wire"
# }
# auto_fill_status = {
# 00 : "End Fill (Level > FULL)",
# 01 : "Not Filling (Level < FULL, Level > FILL)",
# 10 : "Filling (Level < FULL, Level > FILL)",
# 11 : "Start Filling (Level < FILL)"
# }
return usage.get(int(result[1]), "Unknown")
def _do_get_rate(self):
"""
Get helium meter channel 1 probe rate
Input:
None
Output:
rate(int) :
0 : "SLOW"
1 : "FAST"
"""
rate = {
1: "1 : Helium Probe in FAST rate",
0: "0 : Helium Probe in SLOW rate"
}
result = self._execute('X')
return rate.get(int(format(int(result[5:7]), '08b')[6]), "Unknown")
def remote(self):
"""
Set control to remote & locked
"""
logging.info(__name__ + ' : Set control to remote & locked')
self.set_remote_status(1)
def local(self):
"""
Set control to local & locked
"""
logging.info(__name__ + ' : Set control to local & locked')
self.set_remote_status(0)
def set_remote_status(self, mode):
"""
Set remote control status.
Args:
mode(int) :
0 : "Local and locked",
1 : "Remote and locked",
2 : "Local and unlocked",
3 : "Remote and unlocked",
Returns:
None
"""
status = {
0: "Local and locked",
1: "Remote and locked",
2: "Local and unlocked",
3: "Remote and unlocked",
}
logging.info(__name__ + ' : Setting remote control status to %s' %
status.get(mode, "Unknown"))
self._execute('C%s' % mode)
# Functions: Control commands (only recognised when in REMOTE control)
def set_to_slow(self):
"""
Set helium meter channel 1 to slow mode.
"""
self.set_remote_status(1)
logging.info(__name__ + ' : Setting Helium Probe in SLOW rate')
self._execute('S1')
self.set_remote_status(3)
def set_to_fast(self):
"""
Set helium meter channel 1 to fast mode.
"""
self.set_remote_status(1)
logging.info(__name__ + ' : Setting Helium Probe in FAST rate')
self._execute('T1')
self.set_remote_status(3)
def _do_set_rate(self, rate):
"""
Set helium meter channel 1 probe rate
Args:
rate(int) :
0 : "SLOW"
1 : "FAST"
"""
self.set_remote_status(1)
if rate == 0:
self.set_to_slow()
elif rate == 1:
self.set_to_fast()
self.set_remote_status(3)
logging.info(self._do_get_rate())
| [
"logging.info",
"logging.debug",
"time.sleep"
]
| [((1353, 1407), 'logging.debug', 'logging.debug', (["(__name__ + ' : Initializing instrument')"], {}), "(__name__ + ' : Initializing instrument')\n", (1366, 1407), False, 'import logging\n'), ((2719, 2807), 'logging.info', 'logging.info', (["(__name__ + ' : Send the following command to the device: %s' % message)"], {}), "(__name__ + ' : Send the following command to the device: %s' %\n message)\n", (2731, 2807), False, 'import logging\n'), ((2891, 2902), 'time.sleep', 'sleep', (['(0.07)'], {}), '(0.07)\n', (2896, 2902), False, 'from time import sleep\n'), ((5189, 5255), 'logging.info', 'logging.info', (["(__name__ + ' : reading all settings from instrument')"], {}), "(__name__ + ' : reading all settings from instrument')\n", (5201, 5255), False, 'import logging\n'), ((5417, 5472), 'logging.info', 'logging.info', (["(__name__ + ' : Closing ILM200 connection')"], {}), "(__name__ + ' : Closing ILM200 connection')\n", (5429, 5472), False, 'import logging\n'), ((5775, 5824), 'logging.info', 'logging.info', (["(__name__ + ' : Identify the device')"], {}), "(__name__ + ' : Identify the device')\n", (5787, 5824), False, 'import logging\n'), ((6052, 6105), 'logging.info', 'logging.info', (["(__name__ + ' : Read level of channel 1')"], {}), "(__name__ + ' : Read level of channel 1')\n", (6064, 6105), False, 'import logging\n'), ((6291, 6346), 'logging.info', 'logging.info', (["(__name__ + ' : Get status of the device.')"], {}), "(__name__ + ' : Get status of the device.')\n", (6303, 6346), False, 'import logging\n'), ((7710, 7770), 'logging.info', 'logging.info', (["(__name__ + ' : Set control to remote & locked')"], {}), "(__name__ + ' : Set control to remote & locked')\n", (7722, 7770), False, 'import logging\n'), ((7897, 7956), 'logging.info', 'logging.info', (["(__name__ + ' : Set control to local & locked')"], {}), "(__name__ + ' : Set control to local & locked')\n", (7909, 7956), False, 'import logging\n'), ((8868, 8931), 'logging.info', 'logging.info', (["(__name__ + ' : Setting Helium Probe in SLOW rate')"], {}), "(__name__ + ' : Setting Helium Probe in SLOW rate')\n", (8880, 8931), False, 'import logging\n'), ((9137, 9200), 'logging.info', 'logging.info', (["(__name__ + ' : Setting Helium Probe in FAST rate')"], {}), "(__name__ + ' : Setting Helium Probe in FAST rate')\n", (9149, 9200), False, 'import logging\n'), ((2220, 2231), 'time.sleep', 'sleep', (['(0.07)'], {}), '(0.07)\n', (2225, 2231), False, 'from time import sleep\n'), ((2370, 2387), 'logging.debug', 'logging.debug', (['ex'], {}), '(ex)\n', (2383, 2387), False, 'import logging\n'), ((4895, 4912), 'logging.debug', 'logging.debug', (['ex'], {}), '(ex)\n', (4908, 4912), False, 'import logging\n')] |
# This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
"""A module containing a collection of plugins that produce data typically
found in Mac's lsmod command."""
from volatility3.framework import renderers, interfaces, contexts
from volatility3.framework.configuration import requirements
from volatility3.framework.interfaces import plugins
from volatility3.framework.objects import utility
from volatility3.framework.renderers import format_hints
class Lsmod(plugins.PluginInterface):
"""Lists loaded kernel modules."""
_required_framework_version = (1, 0, 0)
_version = (1, 0, 0)
@classmethod
def get_requirements(cls):
return [
requirements.TranslationLayerRequirement(name = 'primary',
description = 'Memory layer for the kernel',
architectures = ["Intel32", "Intel64"]),
requirements.SymbolTableRequirement(name = "darwin", description = "Mac kernel")
]
@classmethod
def list_modules(cls, context: interfaces.context.ContextInterface, layer_name: str, darwin_symbols: str):
"""Lists all the modules in the primary layer.
Args:
context: The context to retrieve required elements (layers, symbol tables) from
layer_name: The name of the layer on which to operate
darwin_symbols: The name of the table containing the kernel symbols
Returns:
A list of modules from the `layer_name` layer
"""
kernel = contexts.Module(context, darwin_symbols, layer_name, 0)
kernel_layer = context.layers[layer_name]
kmod_ptr = kernel.object_from_symbol(symbol_name = "kmod")
try:
kmod = kmod_ptr.dereference().cast("kmod_info")
except exceptions.InvalidAddressException:
return []
yield kmod
try:
kmod = kmod.next
except exceptions.InvalidAddressException:
return []
seen = set()
while kmod != 0 and \
kmod not in seen and \
len(seen) < 1024:
kmod_obj = kmod.dereference()
if not kernel_layer.is_valid(kmod_obj.vol.offset, kmod_obj.vol.size):
break
seen.add(kmod)
yield kmod
try:
kmod = kmod.next
except exceptions.InvalidAddressException:
return
def _generator(self):
for module in self.list_modules(self.context, self.config['primary'], self.config['darwin']):
mod_name = utility.array_to_string(module.name)
mod_size = module.size
yield 0, (format_hints.Hex(module.vol.offset), mod_name, mod_size)
def run(self):
return renderers.TreeGrid([("Offset", format_hints.Hex), ("Name", str), ("Size", int)], self._generator())
| [
"volatility3.framework.renderers.format_hints.Hex",
"volatility3.framework.configuration.requirements.TranslationLayerRequirement",
"volatility3.framework.objects.utility.array_to_string",
"volatility3.framework.configuration.requirements.SymbolTableRequirement",
"volatility3.framework.contexts.Module"
]
| [((1706, 1761), 'volatility3.framework.contexts.Module', 'contexts.Module', (['context', 'darwin_symbols', 'layer_name', '(0)'], {}), '(context, darwin_symbols, layer_name, 0)\n', (1721, 1761), False, 'from volatility3.framework import renderers, interfaces, contexts\n'), ((810, 952), 'volatility3.framework.configuration.requirements.TranslationLayerRequirement', 'requirements.TranslationLayerRequirement', ([], {'name': '"""primary"""', 'description': '"""Memory layer for the kernel"""', 'architectures': "['Intel32', 'Intel64']"}), "(name='primary', description=\n 'Memory layer for the kernel', architectures=['Intel32', 'Intel64'])\n", (850, 952), False, 'from volatility3.framework.configuration import requirements\n'), ((1073, 1149), 'volatility3.framework.configuration.requirements.SymbolTableRequirement', 'requirements.SymbolTableRequirement', ([], {'name': '"""darwin"""', 'description': '"""Mac kernel"""'}), "(name='darwin', description='Mac kernel')\n", (1108, 1149), False, 'from volatility3.framework.configuration import requirements\n'), ((2767, 2803), 'volatility3.framework.objects.utility.array_to_string', 'utility.array_to_string', (['module.name'], {}), '(module.name)\n', (2790, 2803), False, 'from volatility3.framework.objects import utility\n'), ((2862, 2897), 'volatility3.framework.renderers.format_hints.Hex', 'format_hints.Hex', (['module.vol.offset'], {}), '(module.vol.offset)\n', (2878, 2897), False, 'from volatility3.framework.renderers import format_hints\n')] |
"""Subdivided icosahedral mesh generation"""
from __future__ import print_function
import numpy as np
# following: http://blog.andreaskahler.com/2009/06/creating-icosphere-mesh-in-code.html
# hierarchy:
# Icosphere -> Triangle -> Point
class IcoSphere:
"""
Usage: IcoSphere(level)
Maximum supported level = 8
get started with:
>>> A = IcoSphere(3)
... A.plot3d()
"""
# maximum level for subdivision of the icosahedron
maxlevel = 8
def __init__(self, level):
if type(level) is not int:
raise TypeError('level must be an integer')
elif level < 0:
raise Exception('level must be no less than 0')
elif level > self.maxlevel:
raise Exception('level larger than ' + str(self.maxlevel) + ' not supported')
self.level = level
self.points = []
self.triangles = []
self.npts = 0
################################
# initialise level 1 icosahedron
################################
# golden ration
t = (1.0 + np.sqrt(5.0)) / 2.0
# add vertices
self._addPoint(np.array([-1, t, 0]))
self._addPoint(np.array([ 1, t, 0]))
self._addPoint(np.array([-1,-t, 0]))
self._addPoint(np.array([ 1,-t, 0]))
self._addPoint(np.array([ 0,-1, t]))
self._addPoint(np.array([ 0, 1, t]))
self._addPoint(np.array([ 0,-1,-t]))
self._addPoint(np.array([ 0, 1,-t]))
self._addPoint(np.array([ t, 0,-1]))
self._addPoint(np.array([ t, 0, 1]))
self._addPoint(np.array([-t, 0,-1]))
self._addPoint(np.array([-t, 0, 1]))
# make triangles
tris = self.triangles
verts = self.points
# 5 faces around point 0
tris.append(Triangle([ verts[0],verts[11], verts[5]]))
tris.append(Triangle([ verts[0], verts[5], verts[1]]))
tris.append(Triangle([ verts[0], verts[1], verts[7]]))
tris.append(Triangle([ verts[0], verts[7],verts[10]]))
tris.append(Triangle([ verts[0],verts[10],verts[11]]))
# 5 adjacent faces
tris.append(Triangle([ verts[1], verts[5], verts[9]]))
tris.append(Triangle([ verts[5],verts[11], verts[4]]))
tris.append(Triangle([verts[11],verts[10], verts[2]]))
tris.append(Triangle([verts[10], verts[7], verts[6]]))
tris.append(Triangle([ verts[7], verts[1], verts[8]]))
# 5 faces around point 3
tris.append(Triangle([ verts[3], verts[9], verts[4]]))
tris.append(Triangle([ verts[3], verts[4], verts[2]]))
tris.append(Triangle([ verts[3], verts[2], verts[6]]))
tris.append(Triangle([ verts[3], verts[6], verts[8]]))
tris.append(Triangle([ verts[3], verts[8], verts[9]]))
# 5 adjacent faces
tris.append(Triangle([ verts[4], verts[9], verts[5]]))
tris.append(Triangle([ verts[2], verts[4],verts[11]]))
tris.append(Triangle([ verts[6], verts[2],verts[10]]))
tris.append(Triangle([ verts[8], verts[6], verts[7]]))
tris.append(Triangle([ verts[9], verts[8], verts[1]]))
########################################
# refine triangles to desired mesh level
########################################
for l in range(self.level):
midPointDict = {}
faces = []
for tri in self.triangles:
# replace triangle by 4 triangles
p = tri.pts
a = self._getMiddlePoint(p[0], p[1], midPointDict)
b = self._getMiddlePoint(p[1], p[2], midPointDict)
c = self._getMiddlePoint(p[2], p[0], midPointDict)
faces.append(Triangle([p[0], a, c]))
faces.append(Triangle([p[1], b, a]))
faces.append(Triangle([p[2], c, b]))
faces.append(Triangle([a, b, c]))
# once looped thru all triangles overwrite self.triangles
self.triangles = faces
self.nfaces = len(self.triangles)
# check that npts and nfaces are as expected
expected_npts = calculate_npts(self.level)
expected_nfaces = calculate_nfaces(self.level)
if self.npts != calculate_npts(self.level):
raise Exception('npts '+str(self.npts)+' not as expected '+str(expected_npts))
elif self.nfaces != calculate_nfaces(self.level):
raise Exception('nfaces '+str(self.nfaces)+' not as expected '+str(expected_nfaces))
def _addPoint(self, xyz):
"""Add point to self.points"""
self.points.append(Point(self.npts, xyz))
self.npts += 1
def _getMiddlePoint(self, p1, p2, midPointDict):
"""return Point"""
if not isinstance(p1, Point) or not isinstance(p2, Point):
raise TypeError('p1 and p2 must be Points')
# does point already exist?
key = tuple(sorted([p1.idx, p2.idx]))
if key in midPointDict:
# point exists
pass
else:
# point is new
self._addPoint((p1.xyz + p2.xyz)/2)
midPointDict[key] = self.points[-1]
return midPointDict[key]
def plot3d(self):
"""Matplotlib 3D plot of mesh"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xyz = np.asarray([ pt.xyz for pt in self.points ])
x = xyz[:,0]
y = xyz[:,1]
z = xyz[:,2]
ts = np.asarray([ [ p.idx for p in t.pts ] for t in self.triangles ])
ax.plot_trisurf(x,y,ts,z)
plt.show()
def dump_xyz(self):
[ print(*pt.xyz) for pt in self.points ]
def dump_latlonr(self):
[ print(*cart2geo(*pt.xyz)) for pt in self.points ]
class Triangle:
"""A triangle adjoining three adjacent points"""
def __init__(self, pts):
if not isinstance(pts, list):
raise TypeError('pts must be a list')
elif len(pts) !=3:
raise Exception('pts must be of length 3')
else:
self.pts = pts
class Point:
"""A 3D point on the mesh"""
def __init__(self, idx, xyz):
if type(idx) is not int:
raise TypeError('idx must be an integer')
elif not isinstance(xyz,np.ndarray):
raise TypeError('xyz must be a numpy array')
elif xyz.size != 3:
raise Exception('xyz must be of size 3')
else:
# ensure length equals 1 and add to list of points
self.xyz = (xyz/np.linalg.norm(xyz))
self.idx = idx
def calculate_npts(level):
n = 2**level
return 2 + 10 * n**2
def calculate_nfaces(level):
n = 2**level
return 20 * n**2
def cart2geo(x, y, z):
"""convert x y z cartesian coordinates to latitude longitude radius
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
r = np.sqrt(x**2 + y**2 + z**2)
lon = np.rad2deg(np.arctan2(y,x))
lat = np.rad2deg(np.arcsin(z/r))
return lat, lon, r
def geo2cart(lat, lon, r):
"""convert latitude longitude radius to x y z cartesian coordinates
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
x = r * np.cos(lon) * np.cos(lat)
y = r * np.sin(lon) * np.cos(lat)
z = r * np.sin(lat)
return x, y, z
# def xyzToLatLonR(xyz):
# trans = np.array([np.])
| [
"numpy.sqrt",
"numpy.asarray",
"numpy.arcsin",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.arctan2",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"matplotlib.pyplot.show"
]
| [((7381, 7414), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (7388, 7414), True, 'import numpy as np\n'), ((5460, 5472), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5470, 5472), True, 'import matplotlib.pyplot as plt\n'), ((5538, 5580), 'numpy.asarray', 'np.asarray', (['[pt.xyz for pt in self.points]'], {}), '([pt.xyz for pt in self.points])\n', (5548, 5580), True, 'import numpy as np\n'), ((5659, 5719), 'numpy.asarray', 'np.asarray', (['[[p.idx for p in t.pts] for t in self.triangles]'], {}), '([[p.idx for p in t.pts] for t in self.triangles])\n', (5669, 5719), True, 'import numpy as np\n'), ((5766, 5776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5774, 5776), True, 'import matplotlib.pyplot as plt\n'), ((7430, 7446), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (7440, 7446), True, 'import numpy as np\n'), ((7468, 7484), 'numpy.arcsin', 'np.arcsin', (['(z / r)'], {}), '(z / r)\n', (7477, 7484), True, 'import numpy as np\n'), ((7901, 7912), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (7907, 7912), True, 'import numpy as np\n'), ((7939, 7950), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (7945, 7950), True, 'import numpy as np\n'), ((7963, 7974), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (7969, 7974), True, 'import numpy as np\n'), ((1194, 1214), 'numpy.array', 'np.array', (['[-1, t, 0]'], {}), '([-1, t, 0])\n', (1202, 1214), True, 'import numpy as np\n'), ((1239, 1258), 'numpy.array', 'np.array', (['[1, t, 0]'], {}), '([1, t, 0])\n', (1247, 1258), True, 'import numpy as np\n'), ((1284, 1305), 'numpy.array', 'np.array', (['[-1, -t, 0]'], {}), '([-1, -t, 0])\n', (1292, 1305), True, 'import numpy as np\n'), ((1329, 1349), 'numpy.array', 'np.array', (['[1, -t, 0]'], {}), '([1, -t, 0])\n', (1337, 1349), True, 'import numpy as np\n'), ((1374, 1394), 'numpy.array', 'np.array', (['[0, -1, t]'], {}), '([0, -1, t])\n', (1382, 1394), True, 'import numpy as np\n'), ((1419, 1438), 'numpy.array', 'np.array', (['[0, 1, t]'], {}), '([0, 1, t])\n', (1427, 1438), True, 'import numpy as np\n'), ((1464, 1485), 'numpy.array', 'np.array', (['[0, -1, -t]'], {}), '([0, -1, -t])\n', (1472, 1485), True, 'import numpy as np\n'), ((1509, 1529), 'numpy.array', 'np.array', (['[0, 1, -t]'], {}), '([0, 1, -t])\n', (1517, 1529), True, 'import numpy as np\n'), ((1554, 1574), 'numpy.array', 'np.array', (['[t, 0, -1]'], {}), '([t, 0, -1])\n', (1562, 1574), True, 'import numpy as np\n'), ((1599, 1618), 'numpy.array', 'np.array', (['[t, 0, 1]'], {}), '([t, 0, 1])\n', (1607, 1618), True, 'import numpy as np\n'), ((1644, 1665), 'numpy.array', 'np.array', (['[-t, 0, -1]'], {}), '([-t, 0, -1])\n', (1652, 1665), True, 'import numpy as np\n'), ((1689, 1709), 'numpy.array', 'np.array', (['[-t, 0, 1]'], {}), '([-t, 0, 1])\n', (1697, 1709), True, 'import numpy as np\n'), ((7887, 7898), 'numpy.cos', 'np.cos', (['lon'], {}), '(lon)\n', (7893, 7898), True, 'import numpy as np\n'), ((7925, 7936), 'numpy.sin', 'np.sin', (['lon'], {}), '(lon)\n', (7931, 7936), True, 'import numpy as np\n'), ((1119, 1131), 'numpy.sqrt', 'np.sqrt', (['(5.0)'], {}), '(5.0)\n', (1126, 1131), True, 'import numpy as np\n'), ((6798, 6817), 'numpy.linalg.norm', 'np.linalg.norm', (['xyz'], {}), '(xyz)\n', (6812, 6817), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fnmatch
import os
def build_soft_links(project_path, jerry_path):
""" Creates soft links into the @project_path. """
if not os.path.exists(project_path):
os.makedirs(project_path)
links = [
{ # arc
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'arc'),
'link_name': 'arc'
},
{ # include
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'include'),
'link_name': 'include'
},
{ # quark
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'quark'),
'link_name': 'quark'
},
{ # quark/jerryscript
'src': jerry_path,
'link_name': os.path.join('quark', 'jerryscript')
}
]
for link in links:
src = os.path.join(jerry_path, link['src'])
link_name = os.path.join(project_path, link['link_name'])
if not os.path.islink(link_name):
os.symlink(src, link_name)
print("Created symlink '{link_name}' -> '{src}'".format(src=src, link_name=link_name))
def find_sources(root_dir, sub_dir):
"""
Find .c and .S files inside the @root_dir/@sub_dir directory.
Note: the returned paths will be relative to the @root_dir directory.
"""
src_dir = os.path.join(root_dir, sub_dir)
matches = []
for root, dirnames, filenames in os.walk(src_dir):
for filename in fnmatch.filter(filenames, '*.[c|S]'):
file_path = os.path.join(root, filename)
relative_path = os.path.relpath(file_path, root_dir)
matches.append(relative_path)
return matches
def build_jerry_data(jerry_path):
"""
Build up a dictionary which contains the following items:
- sources: list of JerryScript sources which should be built.
- dirs: list of JerryScript dirs used.
- cflags: CFLAGS for the build.
"""
jerry_sources = []
jerry_dirs = set()
for sub_dir in ['jerry-core', 'jerry-math', os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'source')]:
for file in find_sources(os.path.normpath(jerry_path), sub_dir):
path = os.path.join('jerryscript', file)
jerry_sources.append(path)
jerry_dirs.add(os.path.split(path)[0])
jerry_cflags = [
'-DJERRY_GLOBAL_HEAP_SIZE=10',
'-DJERRY_NDEBUG',
'-DJERRY_DISABLE_HEAVY_DEBUG',
'-DJERRY_BUILTIN_NUMBER=0',
'-DJERRY_BUILTIN_STRING=0',
'-DJERRY_BUILTIN_BOOLEAN=0',
#'-DJERRY_BUILTIN_ERRORS=0',
'-DJERRY_BUILTIN_ARRAY=0',
'-DJERRY_BUILTIN_MATH=0',
'-DJERRY_BUILTIN_JSON=0',
'-DJERRY_BUILTIN_DATE=0',
'-DJERRY_BUILTIN_REGEXP=0',
'-DJERRY_BUILTIN_ANNEXB=0',
'-DJERRY_ESNEXT=0',
'-DJERRY_LCACHE=0',
'-DJERRY_PROPERTY_HASHMAP=0',
]
return {
'sources': jerry_sources,
'dirs': jerry_dirs,
'cflags': jerry_cflags,
}
def write_file(path, content):
""" Writes @content into the file at specified by the @path. """
norm_path = os.path.normpath(path)
with open(norm_path, "w+") as f:
f.write(content)
print("Wrote file '{0}'".format(norm_path))
def build_obj_y(source_list):
"""
Build obj-y additions from the @source_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['obj-y += {0}.o'.format(os.path.splitext(fname)[0]) for fname in source_list])
def build_cflags_y(cflags_list):
"""
Build cflags-y additions from the @cflags_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['cflags-y += {0}'.format(cflag) for cflag in cflags_list])
def build_mkdir(dir_list):
""" Build mkdir calls for each dir in the @dir_list. """
return '\n'.join(['\t$(AT)mkdir -p {0}'.format(os.path.join('$(OUT_SRC)', path)) for path in dir_list])
def create_root_kbuild(project_path):
""" Creates @project_path/Kbuild.mk file. """
root_kbuild_path = os.path.join(project_path, 'Kbuild.mk')
root_kbuild_content = '''
obj-$(CONFIG_QUARK_SE_ARC) += arc/
obj-$(CONFIG_QUARK_SE_QUARK) += quark/
'''
write_file(root_kbuild_path, root_kbuild_content)
def create_root_makefile(project_path):
""" Creates @project_path/Makefile file. """
root_makefile_path = os.path.join(project_path, 'Makefile')
root_makefile_content = '''
THIS_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
T := $(abspath $(THIS_DIR)/../..)
PROJECT := {project_name}
BOARD := curie_101
ifeq ($(filter curie_101, $(BOARD)),)
$(error The curie jerry sample application can only run on the curie_101 Board)
endif
BUILDVARIANT ?= debug
quark_DEFCONFIG = $(PROJECT_PATH)/quark/defconfig
arc_DEFCONFIG = $(PROJECT_PATH)/arc/defconfig
# Optional: set the default version
VERSION_MAJOR := 1
VERSION_MINOR := 0
VERSION_PATCH := 0
include $(T)/build/project.mk
'''.format(project_name=project_name)
write_file(root_makefile_path, root_makefile_content)
def create_arc_kbuild(project_path):
""" Creates @project_path/arc/Kbuild.mk file. """
arc_path = os.path.join(project_path, 'arc')
arc_kbuild_path = os.path.join(arc_path, 'Kbuild.mk')
arc_sources = find_sources(arc_path, '.')
arc_kbuild_content = build_obj_y(arc_sources)
write_file(arc_kbuild_path, arc_kbuild_content)
def create_quark_kbuild(project_path, jerry_path):
""" Creates @project_path/quark/Kbuild.mk file. """
quark_kbuild_path = os.path.join(project_path, 'quark', 'Kbuild.mk')
# Extract a few JerryScript related data
jerry_data = build_jerry_data(jerry_path)
jerry_objects = build_obj_y(jerry_data['sources'])
jerry_defines = jerry_data['cflags']
jerry_build_dirs = build_mkdir(jerry_data['dirs'])
quark_include_paths = [
'include',
'jerryscript',
os.path.join('jerryscript', 'jerry-math', 'include'),
os.path.join('jerryscript', 'targets', 'baremetal-sdk', 'curie-bsp', 'include')
] + list(jerry_data['dirs'])
quark_includes = [
'-Wno-error',
] + ['-I%s' % os.path.join(project_path, 'quark', path) for path in quark_include_paths]
quark_cflags = build_cflags_y(jerry_defines + quark_includes)
quark_kbuild_content = '''
{cflags}
obj-y += main.o
{objects}
build_dirs:
{dirs}
$(OUT_SRC): build_dirs
'''.format(objects=jerry_objects, cflags=quark_cflags, dirs=jerry_build_dirs)
write_file(quark_kbuild_path, quark_kbuild_content)
def main(curie_path, project_name, jerry_path):
project_path = os.path.join(curie_path, 'wearable_device_sw', 'projects', project_name)
build_soft_links(project_path, jerry_path)
create_root_kbuild(project_path)
create_root_makefile(project_path)
create_arc_kbuild(project_path)
create_quark_kbuild(project_path, jerry_path)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage:')
print('{script_name} [full or relative path of Curie_BSP]'.format(script_name=sys.argv[0]))
sys.exit(1)
project_name = 'curie_bsp_jerry'
file_dir = os.path.dirname(os.path.abspath(__file__))
jerry_path = os.path.join(file_dir, "..", "..", "..")
curie_path = os.path.join(os.getcwd(), sys.argv[1])
main(curie_path, project_name, jerry_path)
| [
"os.path.exists",
"os.makedirs",
"os.path.join",
"os.symlink",
"os.path.splitext",
"os.getcwd",
"os.path.normpath",
"os.path.split",
"sys.exit",
"fnmatch.filter",
"os.path.islink",
"os.path.abspath",
"os.walk",
"os.path.relpath"
]
| [((2005, 2036), 'os.path.join', 'os.path.join', (['root_dir', 'sub_dir'], {}), '(root_dir, sub_dir)\n', (2017, 2036), False, 'import os\n'), ((2092, 2108), 'os.walk', 'os.walk', (['src_dir'], {}), '(src_dir)\n', (2099, 2108), False, 'import os\n'), ((3803, 3825), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (3819, 3825), False, 'import os\n'), ((4756, 4795), 'os.path.join', 'os.path.join', (['project_path', '"""Kbuild.mk"""'], {}), "(project_path, 'Kbuild.mk')\n", (4768, 4795), False, 'import os\n'), ((5075, 5113), 'os.path.join', 'os.path.join', (['project_path', '"""Makefile"""'], {}), "(project_path, 'Makefile')\n", (5087, 5113), False, 'import os\n'), ((5890, 5923), 'os.path.join', 'os.path.join', (['project_path', '"""arc"""'], {}), "(project_path, 'arc')\n", (5902, 5923), False, 'import os\n'), ((5946, 5981), 'os.path.join', 'os.path.join', (['arc_path', '"""Kbuild.mk"""'], {}), "(arc_path, 'Kbuild.mk')\n", (5958, 5981), False, 'import os\n'), ((6264, 6312), 'os.path.join', 'os.path.join', (['project_path', '"""quark"""', '"""Kbuild.mk"""'], {}), "(project_path, 'quark', 'Kbuild.mk')\n", (6276, 6312), False, 'import os\n'), ((7332, 7404), 'os.path.join', 'os.path.join', (['curie_path', '"""wearable_device_sw"""', '"""projects"""', 'project_name'], {}), "(curie_path, 'wearable_device_sw', 'projects', project_name)\n", (7344, 7404), False, 'import os\n'), ((7946, 7986), 'os.path.join', 'os.path.join', (['file_dir', '""".."""', '""".."""', '""".."""'], {}), "(file_dir, '..', '..', '..')\n", (7958, 7986), False, 'import os\n'), ((782, 810), 'os.path.exists', 'os.path.exists', (['project_path'], {}), '(project_path)\n', (796, 810), False, 'import os\n'), ((820, 845), 'os.makedirs', 'os.makedirs', (['project_path'], {}), '(project_path)\n', (831, 845), False, 'import os\n'), ((1512, 1549), 'os.path.join', 'os.path.join', (['jerry_path', "link['src']"], {}), "(jerry_path, link['src'])\n", (1524, 1549), False, 'import os\n'), ((1570, 1615), 'os.path.join', 'os.path.join', (['project_path', "link['link_name']"], {}), "(project_path, link['link_name'])\n", (1582, 1615), False, 'import os\n'), ((2134, 2170), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.[c|S]"""'], {}), "(filenames, '*.[c|S]')\n", (2148, 2170), False, 'import fnmatch\n'), ((2708, 2771), 'os.path.join', 'os.path.join', (['"""targets"""', '"""baremetal-sdk"""', '"""curie-bsp"""', '"""source"""'], {}), "('targets', 'baremetal-sdk', 'curie-bsp', 'source')\n", (2720, 2771), False, 'import os\n'), ((7820, 7831), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7828, 7831), False, 'import sys\n'), ((7902, 7927), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (7917, 7927), False, 'import os\n'), ((8017, 8028), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8026, 8028), False, 'import os\n'), ((896, 969), 'os.path.join', 'os.path.join', (['"""targets"""', '"""baremetal-sdk"""', '"""curie-bsp"""', '"""jerry_app"""', '"""arc"""'], {}), "('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'arc')\n", (908, 969), False, 'import os\n'), ((1052, 1129), 'os.path.join', 'os.path.join', (['"""targets"""', '"""baremetal-sdk"""', '"""curie-bsp"""', '"""jerry_app"""', '"""include"""'], {}), "('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'include')\n", (1064, 1129), False, 'import os\n'), ((1214, 1289), 'os.path.join', 'os.path.join', (['"""targets"""', '"""baremetal-sdk"""', '"""curie-bsp"""', '"""jerry_app"""', '"""quark"""'], {}), "('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'quark')\n", (1226, 1289), False, 'import os\n'), ((1421, 1457), 'os.path.join', 'os.path.join', (['"""quark"""', '"""jerryscript"""'], {}), "('quark', 'jerryscript')\n", (1433, 1457), False, 'import os\n'), ((1631, 1656), 'os.path.islink', 'os.path.islink', (['link_name'], {}), '(link_name)\n', (1645, 1656), False, 'import os\n'), ((1670, 1696), 'os.symlink', 'os.symlink', (['src', 'link_name'], {}), '(src, link_name)\n', (1680, 1696), False, 'import os\n'), ((2196, 2224), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (2208, 2224), False, 'import os\n'), ((2253, 2289), 'os.path.relpath', 'os.path.relpath', (['file_path', 'root_dir'], {}), '(file_path, root_dir)\n', (2268, 2289), False, 'import os\n'), ((2807, 2835), 'os.path.normpath', 'os.path.normpath', (['jerry_path'], {}), '(jerry_path)\n', (2823, 2835), False, 'import os\n'), ((2866, 2899), 'os.path.join', 'os.path.join', (['"""jerryscript"""', 'file'], {}), "('jerryscript', file)\n", (2878, 2899), False, 'import os\n'), ((6635, 6687), 'os.path.join', 'os.path.join', (['"""jerryscript"""', '"""jerry-math"""', '"""include"""'], {}), "('jerryscript', 'jerry-math', 'include')\n", (6647, 6687), False, 'import os\n'), ((6697, 6776), 'os.path.join', 'os.path.join', (['"""jerryscript"""', '"""targets"""', '"""baremetal-sdk"""', '"""curie-bsp"""', '"""include"""'], {}), "('jerryscript', 'targets', 'baremetal-sdk', 'curie-bsp', 'include')\n", (6709, 6776), False, 'import os\n'), ((4585, 4617), 'os.path.join', 'os.path.join', (['"""$(OUT_SRC)"""', 'path'], {}), "('$(OUT_SRC)', path)\n", (4597, 4617), False, 'import os\n'), ((6874, 6915), 'os.path.join', 'os.path.join', (['project_path', '"""quark"""', 'path'], {}), "(project_path, 'quark', path)\n", (6886, 6915), False, 'import os\n'), ((2966, 2985), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (2979, 2985), False, 'import os\n'), ((4142, 4165), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (4158, 4165), False, 'import os\n')] |
from math import sqrt
import emoji
num = int(input("Digite um número: "))
raiz = sqrt(num)
print("A raiz do número {0} é {1:.2f}.".format(num, raiz))
print(emoji.emojize("Hello World! :earth_americas:", use_aliases=True))
| [
"emoji.emojize",
"math.sqrt"
]
| [((81, 90), 'math.sqrt', 'sqrt', (['num'], {}), '(num)\n', (85, 90), False, 'from math import sqrt\n'), ((156, 220), 'emoji.emojize', 'emoji.emojize', (['"""Hello World! :earth_americas:"""'], {'use_aliases': '(True)'}), "('Hello World! :earth_americas:', use_aliases=True)\n", (169, 220), False, 'import emoji\n')] |
from __future__ import print_function
import argparse
import os
import time, platform
import cv2
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info
from losses import *
from model import DexiNed
# from model0C import DexiNed
from utils import (image_normalization, save_image_batch_to_disk,
visualize_result)
IS_LINUX = True if platform.system()=="Linux" else False
def train_one_epoch(epoch, dataloader, model, criterion, optimizer, device,
log_interval_vis, tb_writer, args=None):
imgs_res_folder = os.path.join(args.output_dir, 'current_res')
os.makedirs(imgs_res_folder,exist_ok=True)
# Put model in training mode
model.train()
# l_weight = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.1] # for bdcn ori loss
# before [0.6,0.6,1.1,1.1,0.4,0.4,1.3] [0.4,0.4,1.1,1.1,0.6,0.6,1.3],[0.4,0.4,1.1,1.1,0.8,0.8,1.3]
l_weight = [0.7,0.7,1.1,1.1,0.3,0.3,1.3] # for bdcn loss theory 3 before the last 1.3 0.6-0..5
# l_weight = [[0.05, 2.], [0.05, 2.], [0.05, 2.],
# [0.1, 1.], [0.1, 1.], [0.1, 1.],
# [0.01, 4.]] # for cats loss
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device) # BxCxHxW
labels = sample_batched['labels'].to(device) # BxHxW
preds_list = model(images)
# loss = sum([criterion(preds, labels, l_w, device) for preds, l_w in zip(preds_list, l_weight)]) # cats_loss
loss = sum([criterion(preds, labels,l_w)/args.batch_size for preds, l_w in zip(preds_list,l_weight)]) # bdcn_loss
# loss = sum([criterion(preds, labels) for preds in preds_list]) #HED loss, rcf_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if tb_writer is not None:
tb_writer.add_scalar('loss',
loss.detach(),
(len(dataloader) * epoch + batch_id))
if batch_id % 5 == 0:
print(time.ctime(), 'Epoch: {0} Sample {1}/{2} Loss: {3}'
.format(epoch, batch_id, len(dataloader), loss.item()))
if batch_id % log_interval_vis == 0:
res_data = []
img = images.cpu().numpy()
res_data.append(img[2])
ed_gt = labels.cpu().numpy()
res_data.append(ed_gt[2])
# tmp_pred = tmp_preds[2,...]
for i in range(len(preds_list)):
tmp = preds_list[i]
tmp = tmp[2]
# print(tmp.shape)
tmp = torch.sigmoid(tmp).unsqueeze(dim=0)
tmp = tmp.cpu().detach().numpy()
res_data.append(tmp)
vis_imgs = visualize_result(res_data, arg=args)
del tmp, res_data
vis_imgs = cv2.resize(vis_imgs,
(int(vis_imgs.shape[1]*0.8), int(vis_imgs.shape[0]*0.8)))
img_test = 'Epoch: {0} Sample {1}/{2} Loss: {3}' \
.format(epoch, batch_id, len(dataloader), loss.item())
BLACK = (0, 0, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
font_size = 1.1
font_color = BLACK
font_thickness = 2
x, y = 30, 30
vis_imgs = cv2.putText(vis_imgs,
img_test,
(x, y),
font, font_size, font_color, font_thickness, cv2.LINE_AA)
cv2.imwrite(os.path.join(imgs_res_folder, 'results.png'), vis_imgs)
def validate_one_epoch(epoch, dataloader, model, device, output_dir, arg=None):
# XXX This is not really validation, but testing
# Put model in eval mode
model.eval()
with torch.no_grad():
for _, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
# labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
preds = model(images)
# print('pred shape', preds[0].shape)
save_image_batch_to_disk(preds[-1],
output_dir,
file_names,img_shape=image_shape,
arg=arg)
def test(checkpoint_path, dataloader, model, device, output_dir, args):
if not os.path.isfile(checkpoint_path):
raise FileNotFoundError(
f"Checkpoint filte note found: {checkpoint_path}")
print(f"Restoring weights from: {checkpoint_path}")
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
# Put model in evaluation mode
model.eval()
with torch.no_grad():
total_duration = []
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
if not args.test_data == "CLASSIC":
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
print(f"input tensor shape: {images.shape}")
# images = images[:, [2, 1, 0], :, :]
start_time = time.time()
preds = model(images)
tmp_duration = time.time() - start_time
total_duration.append(tmp_duration)
save_image_batch_to_disk(preds,
output_dir,
file_names,
image_shape,
arg=args)
torch.cuda.empty_cache()
total_duration = np.array(total_duration)
print("******** Testing finished in", args.test_data, "dataset. *****")
print("Average time per image: %f.4" % total_duration.mean(), "seconds")
print("Time spend in the Dataset: %f.4" % total_duration.sum(), "seconds")
def testPich(checkpoint_path, dataloader, model, device, output_dir, args):
# a test model plus the interganged channels
if not os.path.isfile(checkpoint_path):
raise FileNotFoundError(
f"Checkpoint filte note found: {checkpoint_path}")
print(f"Restoring weights from: {checkpoint_path}")
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
# Put model in evaluation mode
model.eval()
with torch.no_grad():
total_duration = []
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
if not args.test_data == "CLASSIC":
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
print(f"input tensor shape: {images.shape}")
start_time = time.time()
# images2 = images[:, [1, 0, 2], :, :] #GBR
images2 = images[:, [2, 1, 0], :, :] # RGB
preds = model(images)
preds2 = model(images2)
tmp_duration = time.time() - start_time
total_duration.append(tmp_duration)
save_image_batch_to_disk([preds,preds2],
output_dir,
file_names,
image_shape,
arg=args, is_inchannel=True)
torch.cuda.empty_cache()
total_duration = np.array(total_duration)
print("******** Testing finished in", args.test_data, "dataset. *****")
print("Average time per image: %f.4" % total_duration.mean(), "seconds")
print("Time spend in the Dataset: %f.4" % total_duration.sum(), "seconds")
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='DexiNed trainer.')
parser.add_argument('--choose_test_data',
type=int,
default=3,
help='Already set the dataset for testing choice: 0 - 8')
# ----------- test -------0--
TEST_DATA = DATASET_NAMES[parser.parse_args().choose_test_data] # max 8
test_inf = dataset_info(TEST_DATA, is_linux=IS_LINUX)
test_dir = test_inf['data_dir']
is_testing = True # current test _bdcnlossNew256-sd7-1.10.4p5
# Training settings
TRAIN_DATA = DATASET_NAMES[0] # BIPED=0
train_inf = dataset_info(TRAIN_DATA, is_linux=IS_LINUX)
train_dir = train_inf['data_dir']
# Data parameters
parser.add_argument('--input_dir',
type=str,
default=train_dir,
help='the path to the directory with the input data.')
parser.add_argument('--input_val_dir',
type=str,
default=test_inf['data_dir'],
help='the path to the directory with the input data for validation.')
parser.add_argument('--output_dir',
type=str,
default='checkpoints',
help='the path to output the results.')
parser.add_argument('--train_data',
type=str,
choices=DATASET_NAMES,
default=TRAIN_DATA,
help='Name of the dataset.')
parser.add_argument('--test_data',
type=str,
choices=DATASET_NAMES,
default=TEST_DATA,
help='Name of the dataset.')
parser.add_argument('--test_list',
type=str,
default=test_inf['test_list'],
help='Dataset sample indices list.')
parser.add_argument('--train_list',
type=str,
default=train_inf['train_list'],
help='Dataset sample indices list.')
parser.add_argument('--is_testing',type=bool,
default=is_testing,
help='Script in testing mode.')
parser.add_argument('--double_img',
type=bool,
default=True,
help='True: use same 2 imgs changing channels') # Just for test
parser.add_argument('--resume',
type=bool,
default=False,
help='use previous trained data') # Just for test
parser.add_argument('--checkpoint_data',
type=str,
default='14/14_model.pth',
help='Checkpoint path from which to restore model weights from.')
parser.add_argument('--test_img_width',
type=int,
default=test_inf['img_width'],
help='Image width for testing.')
parser.add_argument('--test_img_height',
type=int,
default=test_inf['img_height'],
help='Image height for testing.')
parser.add_argument('--res_dir',
type=str,
default='result',
help='Result directory')
parser.add_argument('--log_interval_vis',
type=int,
default=50,
help='The number of batches to wait before printing test predictions.')
parser.add_argument('--epochs',
type=int,
default=22,
metavar='N',
help='Number of training epochs (default: 25).')
parser.add_argument('--lr',
default=1e-4,
type=float,
help='Initial learning rate.')
parser.add_argument('--wd',
type=float,
default=1e-4,
metavar='WD',
help='weight decay (default: 1e-4)')
# parser.add_argument('--lr_stepsize',
# default=1e4,
# type=int,
# help='Learning rate step size.')
parser.add_argument('--batch_size',
type=int,
default=8,
metavar='B',
help='the mini-batch size (default: 8)')
parser.add_argument('--workers',
default=8,
type=int,
help='The number of workers for the dataloaders.')
parser.add_argument('--tensorboard',type=bool,
default=True,
help='Use Tensorboard for logging.'),
parser.add_argument('--img_width',
type=int,
default=480,
help='Image width for training.') # BIPED 400 BSDS 352 MDBD 480
parser.add_argument('--img_height',
type=int,
default=480,
help='Image height for training.') # BIPED 400 BSDS 352
parser.add_argument('--channel_swap',
default=[2, 1, 0],
type=int)
parser.add_argument('--crop_img',
default=True,
type=bool,
help='If true crop training images, else resize images to match image width and height.')
parser.add_argument('--mean_pixel_values',
default=[103.939,116.779,123.68, 137.86],
type=float) # [103.939,116.779,123.68] [104.00699, 116.66877, 122.67892]
args = parser.parse_args()
return args
def main(args):
"""Main function."""
print(f"Number of GPU's available: {torch.cuda.device_count()}")
print(f"Pytorch version: {torch.__version__}")
# Tensorboard summary writer
tb_writer = None
training_dir = os.path.join(args.output_dir,args.train_data)
os.makedirs(training_dir,exist_ok=True)
checkpoint_path = os.path.join(args.output_dir, args.train_data, args.checkpoint_data)
if args.tensorboard and not args.is_testing:
# from tensorboardX import SummaryWriter # previous torch version
from torch.utils.tensorboard import SummaryWriter # for torch 1.4 or greather
tb_writer = SummaryWriter(log_dir=training_dir)
# Get computing device
device = torch.device('cpu' if torch.cuda.device_count() == 0
else 'cuda')
# Instantiate model and move it to the computing device
model = DexiNed().to(device)
# model = nn.DataParallel(model)
ini_epoch =0
if not args.is_testing:
if args.resume:
ini_epoch=17
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
dataset_train = BipedDataset(args.input_dir,
img_width=args.img_width,
img_height=args.img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
train_mode='train',
arg=args
)
dataloader_train = DataLoader(dataset_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers)
dataset_val = TestDataset(args.input_val_dir,
test_data=args.test_data,
img_width=args.test_img_width,
img_height=args.test_img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
test_list=args.test_list, arg=args
)
dataloader_val = DataLoader(dataset_val,
batch_size=1,
shuffle=False,
num_workers=args.workers)
# Testing
if args.is_testing:
output_dir = os.path.join(args.res_dir, args.train_data+"2"+ args.test_data)
print(f"output_dir: {output_dir}")
if args.double_img:
# predict twice an image changing channels, then mix those results
testPich(checkpoint_path, dataloader_val, model, device, output_dir, args)
else:
test(checkpoint_path, dataloader_val, model, device, output_dir, args)
return
criterion = bdcn_loss2
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.wd)
# lr_schd = lr_scheduler.StepLR(optimizer, step_size=args.lr_stepsize,
# gamma=args.lr_gamma)
# Main training loop
seed=1021
for epoch in range(ini_epoch,args.epochs):
if epoch%7==0:
seed = seed+1000
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
print("------ Random seed applied-------------")
# Create output directories
output_dir_epoch = os.path.join(args.output_dir,args.train_data, str(epoch))
img_test_dir = os.path.join(output_dir_epoch, args.test_data + '_res')
os.makedirs(output_dir_epoch,exist_ok=True)
os.makedirs(img_test_dir,exist_ok=True)
train_one_epoch(epoch,
dataloader_train,
model,
criterion,
optimizer,
device,
args.log_interval_vis,
tb_writer,
args=args)
validate_one_epoch(epoch,
dataloader_val,
model,
device,
img_test_dir,
arg=args)
# Save model after end of every epoch
torch.save(model.module.state_dict() if hasattr(model, "module") else model.state_dict(),
os.path.join(output_dir_epoch, '{0}_model.pth'.format(epoch)))
if __name__ == '__main__':
args = parse_args()
main(args)
| [
"torch.cuda.device_count",
"model.DexiNed",
"datasets.dataset_info",
"torch.utils.tensorboard.SummaryWriter",
"time.ctime",
"argparse.ArgumentParser",
"platform.system",
"utils.visualize_result",
"cv2.putText",
"os.path.isfile",
"utils.save_image_batch_to_disk",
"time.time",
"torch.cuda.empty_cache",
"torch.manual_seed",
"os.makedirs",
"torch.load",
"torch.sigmoid",
"os.path.join",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.manual_seed"
]
| [((655, 699), 'os.path.join', 'os.path.join', (['args.output_dir', '"""current_res"""'], {}), "(args.output_dir, 'current_res')\n", (667, 699), False, 'import os\n'), ((704, 747), 'os.makedirs', 'os.makedirs', (['imgs_res_folder'], {'exist_ok': '(True)'}), '(imgs_res_folder, exist_ok=True)\n', (715, 747), False, 'import os\n'), ((8019, 8074), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""DexiNed trainer."""'}), "(description='DexiNed trainer.')\n", (8042, 8074), False, 'import argparse\n'), ((8399, 8441), 'datasets.dataset_info', 'dataset_info', (['TEST_DATA'], {'is_linux': 'IS_LINUX'}), '(TEST_DATA, is_linux=IS_LINUX)\n', (8411, 8441), False, 'from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info\n'), ((8629, 8672), 'datasets.dataset_info', 'dataset_info', (['TRAIN_DATA'], {'is_linux': 'IS_LINUX'}), '(TRAIN_DATA, is_linux=IS_LINUX)\n', (8641, 8672), False, 'from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info\n'), ((14202, 14248), 'os.path.join', 'os.path.join', (['args.output_dir', 'args.train_data'], {}), '(args.output_dir, args.train_data)\n', (14214, 14248), False, 'import os\n'), ((14252, 14292), 'os.makedirs', 'os.makedirs', (['training_dir'], {'exist_ok': '(True)'}), '(training_dir, exist_ok=True)\n', (14263, 14292), False, 'import os\n'), ((14314, 14382), 'os.path.join', 'os.path.join', (['args.output_dir', 'args.train_data', 'args.checkpoint_data'], {}), '(args.output_dir, args.train_data, args.checkpoint_data)\n', (14326, 14382), False, 'import os\n'), ((16387, 16465), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_val'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': 'args.workers'}), '(dataset_val, batch_size=1, shuffle=False, num_workers=args.workers)\n', (16397, 16465), False, 'from torch.utils.data import DataLoader\n'), ((458, 475), 'platform.system', 'platform.system', ([], {}), '()\n', (473, 475), False, 'import time, platform\n'), ((3845, 3860), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3858, 3860), False, 'import torch\n'), ((4527, 4558), 'os.path.isfile', 'os.path.isfile', (['checkpoint_path'], {}), '(checkpoint_path)\n', (4541, 4558), False, 'import os\n'), ((4738, 4786), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': 'device'}), '(checkpoint_path, map_location=device)\n', (4748, 4786), False, 'import torch\n'), ((4888, 4903), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4901, 4903), False, 'import torch\n'), ((6242, 6273), 'os.path.isfile', 'os.path.isfile', (['checkpoint_path'], {}), '(checkpoint_path)\n', (6256, 6273), False, 'import os\n'), ((6453, 6501), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': 'device'}), '(checkpoint_path, map_location=device)\n', (6463, 6501), False, 'import torch\n'), ((6603, 6618), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6616, 6618), False, 'import torch\n'), ((14613, 14648), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'training_dir'}), '(log_dir=training_dir)\n', (14626, 14648), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((15663, 15760), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.workers'}), '(dataset_train, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers)\n', (15673, 15760), False, 'from torch.utils.data import DataLoader\n'), ((16622, 16688), 'os.path.join', 'os.path.join', (['args.res_dir', "(args.train_data + '2' + args.test_data)"], {}), "(args.res_dir, args.train_data + '2' + args.test_data)\n", (16634, 16688), False, 'import os\n'), ((17787, 17842), 'os.path.join', 'os.path.join', (['output_dir_epoch', "(args.test_data + '_res')"], {}), "(output_dir_epoch, args.test_data + '_res')\n", (17799, 17842), False, 'import os\n'), ((17851, 17895), 'os.makedirs', 'os.makedirs', (['output_dir_epoch'], {'exist_ok': '(True)'}), '(output_dir_epoch, exist_ok=True)\n', (17862, 17895), False, 'import os\n'), ((17903, 17943), 'os.makedirs', 'os.makedirs', (['img_test_dir'], {'exist_ok': '(True)'}), '(img_test_dir, exist_ok=True)\n', (17914, 17943), False, 'import os\n'), ((2816, 2852), 'utils.visualize_result', 'visualize_result', (['res_data'], {'arg': 'args'}), '(res_data, arg=args)\n', (2832, 2852), False, 'from utils import image_normalization, save_image_batch_to_disk, visualize_result\n'), ((3370, 3471), 'cv2.putText', 'cv2.putText', (['vis_imgs', 'img_test', '(x, y)', 'font', 'font_size', 'font_color', 'font_thickness', 'cv2.LINE_AA'], {}), '(vis_imgs, img_test, (x, y), font, font_size, font_color,\n font_thickness, cv2.LINE_AA)\n', (3381, 3471), False, 'import cv2\n'), ((4240, 4336), 'utils.save_image_batch_to_disk', 'save_image_batch_to_disk', (['preds[-1]', 'output_dir', 'file_names'], {'img_shape': 'image_shape', 'arg': 'arg'}), '(preds[-1], output_dir, file_names, img_shape=\n image_shape, arg=arg)\n', (4264, 4336), False, 'from utils import image_normalization, save_image_batch_to_disk, visualize_result\n'), ((5404, 5415), 'time.time', 'time.time', ([], {}), '()\n', (5413, 5415), False, 'import time, platform\n'), ((5562, 5640), 'utils.save_image_batch_to_disk', 'save_image_batch_to_disk', (['preds', 'output_dir', 'file_names', 'image_shape'], {'arg': 'args'}), '(preds, output_dir, file_names, image_shape, arg=args)\n', (5586, 5640), False, 'from utils import image_normalization, save_image_batch_to_disk, visualize_result\n'), ((5801, 5825), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5823, 5825), False, 'import torch\n'), ((7069, 7080), 'time.time', 'time.time', ([], {}), '()\n', (7078, 7080), False, 'import time, platform\n'), ((7375, 7486), 'utils.save_image_batch_to_disk', 'save_image_batch_to_disk', (['[preds, preds2]', 'output_dir', 'file_names', 'image_shape'], {'arg': 'args', 'is_inchannel': '(True)'}), '([preds, preds2], output_dir, file_names,\n image_shape, arg=args, is_inchannel=True)\n', (7399, 7486), False, 'from utils import image_normalization, save_image_batch_to_disk, visualize_result\n'), ((7642, 7666), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7664, 7666), False, 'import torch\n'), ((14855, 14864), 'model.DexiNed', 'DexiNed', ([], {}), '()\n', (14862, 14864), False, 'from model import DexiNed\n'), ((17516, 17539), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (17533, 17539), False, 'import torch\n'), ((17552, 17580), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (17574, 17580), False, 'import torch\n'), ((2107, 2119), 'time.ctime', 'time.ctime', ([], {}), '()\n', (2117, 2119), False, 'import time, platform\n'), ((3597, 3641), 'os.path.join', 'os.path.join', (['imgs_res_folder', '"""results.png"""'], {}), "(imgs_res_folder, 'results.png')\n", (3609, 3641), False, 'import os\n'), ((5477, 5488), 'time.time', 'time.time', ([], {}), '()\n', (5486, 5488), False, 'import time, platform\n'), ((7290, 7301), 'time.time', 'time.time', ([], {}), '()\n', (7299, 7301), False, 'import time, platform\n'), ((14047, 14072), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14070, 14072), False, 'import torch\n'), ((14712, 14737), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14735, 14737), False, 'import torch\n'), ((15041, 15089), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': 'device'}), '(checkpoint_path, map_location=device)\n', (15051, 15089), False, 'import torch\n'), ((2670, 2688), 'torch.sigmoid', 'torch.sigmoid', (['tmp'], {}), '(tmp)\n', (2683, 2688), False, 'import torch\n')] |
from __future__ import absolute_import
from django.conf.urls import patterns, url
from django_comments.feeds import LatestCommentFeed
from custom_comments import views
feeds = {
'comments': LatestCommentFeed,
}
urlpatterns = patterns('',
url(r'^post/$', views.custom_submit_comment),
url(r'^flag/(\d+)/$', views.custom_flag_comment),
url(r'^delete/(\d+)/$', views.custom_delete_comment),
url(r'^approve/(\d+)/$', views.custom_approve_comment),
url(r'^cr/(\d+)/(.+)/$', 'django.contrib.contenttypes.views.shortcut', name='comments-url-redirect'),
)
urlpatterns += patterns('',
(r'^rss/comments/$', LatestCommentFeed()),
)
| [
"django.conf.urls.url",
"django_comments.feeds.LatestCommentFeed"
]
| [((253, 296), 'django.conf.urls.url', 'url', (['"""^post/$"""', 'views.custom_submit_comment'], {}), "('^post/$', views.custom_submit_comment)\n", (256, 296), False, 'from django.conf.urls import patterns, url\n'), ((303, 351), 'django.conf.urls.url', 'url', (['"""^flag/(\\\\d+)/$"""', 'views.custom_flag_comment'], {}), "('^flag/(\\\\d+)/$', views.custom_flag_comment)\n", (306, 351), False, 'from django.conf.urls import patterns, url\n'), ((357, 409), 'django.conf.urls.url', 'url', (['"""^delete/(\\\\d+)/$"""', 'views.custom_delete_comment'], {}), "('^delete/(\\\\d+)/$', views.custom_delete_comment)\n", (360, 409), False, 'from django.conf.urls import patterns, url\n'), ((415, 469), 'django.conf.urls.url', 'url', (['"""^approve/(\\\\d+)/$"""', 'views.custom_approve_comment'], {}), "('^approve/(\\\\d+)/$', views.custom_approve_comment)\n", (418, 469), False, 'from django.conf.urls import patterns, url\n'), ((475, 580), 'django.conf.urls.url', 'url', (['"""^cr/(\\\\d+)/(.+)/$"""', '"""django.contrib.contenttypes.views.shortcut"""'], {'name': '"""comments-url-redirect"""'}), "('^cr/(\\\\d+)/(.+)/$', 'django.contrib.contenttypes.views.shortcut', name\n ='comments-url-redirect')\n", (478, 580), False, 'from django.conf.urls import patterns, url\n'), ((633, 652), 'django_comments.feeds.LatestCommentFeed', 'LatestCommentFeed', ([], {}), '()\n', (650, 652), False, 'from django_comments.feeds import LatestCommentFeed\n')] |
from __future__ import division
from cctbx.array_family import flex
from cctbx import xray
from cctbx import crystal
from cctbx import maptbx
from cctbx.maptbx import minimization
from libtbx.test_utils import approx_equal
import random
from cctbx.development import random_structure
from cctbx import sgtbx
if (1):
random.seed(0)
flex.set_random_seed(0)
def get_xrs():
crystal_symmetry = crystal.symmetry(
unit_cell=(10,10,10,90,90,90),
space_group_symbol="P 1")
return xray.structure(
crystal_symmetry=crystal_symmetry,
scatterers=flex.xray_scatterer([
xray.scatterer(label="C", site=(0,0,0))]))
def get_map(xrs, d_min=1.):
f_calc = xrs.structure_factors(d_min=d_min).f_calc()
fft_map = f_calc.fft_map()
fft_map.apply_sigma_scaling()
return fft_map.real_map_unpadded(), f_calc
def exercise_00():
"""
Exercise maptbx.target_and_gradients_diffmap .
"""
xrs = get_xrs()
map_data, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_data,
step = 0.3,
sites_frac = xrs.sites_frac())
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
assert approx_equal(tg.target(), 0)
assert approx_equal(list(tg.gradients()), [[0,0,0]])
xrs = xrs.translate(x=0.3, y=-0.5, z=0.7)
assert approx_equal(xrs.sites_cart(), [[0.3,-0.5,0.7]])
map_current, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_current,
step = 0.3,
sites_frac = xrs.sites_frac())
assert tg.target() > 0
for g in tg.gradients():
for g_ in g:
assert abs(g_)>0.
def exercise_01(d_min=1.0):
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization.
"""
xrs = get_xrs()
map_target, f_calc = get_map(xrs=xrs)
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
for sx in [-1,0,1]:
for sy in [-1,0,1]:
for sz in [-1,0,1]:
xrs_cp = xrs.deep_copy_scatterers()
xrs_cp = xrs_cp.translate(x=0.3*sx, y=0.5*sy, z=0.7*sz)
assert approx_equal(xrs_cp.sites_cart(), [[0.3*sx,0.5*sy,0.7*sz]],1.e-6)
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs_cp.unit_cell(),
space_group_info = xrs_cp.space_group_info(),
pre_determined_n_real = map_target.accessor().all())
o = minimization.run(
xray_structure = xrs_cp,
miller_array = f_calc,
crystal_gridding = crystal_gridding,
map_target = map_target,
step = d_min/4,
target_type = "diffmap")
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
def exercise_02():
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization
(bigger model).
"""
def compute_map(xray_structure, d_min=1.5, resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error>0.7
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
for step in [miller_array.d_min()/4]*5:
minimized = minimization.run(
xray_structure = xrs_sh,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = step,
geometry_restraints_manager = None,
target_type = "diffmap")
xrs_sh = minimized.xray_structure
map_current = minimized.map_current
final_error = flex.mean(xrs.distances(other = minimized.xray_structure))
assert approx_equal(start_error, 0.8, 1.e-3)
assert final_error < 1.e-4
def exercise_03():
"""
Exercise maptbx.target_and_gradients_simple.
"""
def compute_map(xray_structure, d_min=1.5, resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
#
t1 = maptbx.real_space_target_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
selection = flex.bool(xrs_sh.scatterers().size(), True))
g1 = maptbx.real_space_gradients_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
o = maptbx.target_and_gradients_simple(
unit_cell = xrs.unit_cell(),
map_target = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
assert approx_equal(t1, o.target())
for gi,gj in zip(g1, o.gradients()):
assert approx_equal(gi, gj)
def exercise_04():
"""
Exercise maptbx.target_and_gradients_simple in action: minimization
(bigger model).
"""
def compute_map(xray_structure, d_min=1., resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 150)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.3)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error > 0.29
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
xrs_sh_ = xrs_sh.deep_copy_scatterers()
minimized = minimization.run(
xray_structure = xrs_sh_,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = 0.5,
geometry_restraints_manager = None,
target_type = "simple")
xrs_sh_ = xrs_sh_.replace_sites_cart(minimized.sites_cart)
final_error = flex.mean(xrs.distances(other = xrs_sh_))
assert final_error < 0.015
if (__name__ == "__main__"):
exercise_00()
exercise_01()
exercise_02()
exercise_03()
exercise_04()
| [
"cctbx.sgtbx.space_group_info",
"random.seed",
"libtbx.test_utils.approx_equal",
"cctbx.array_family.flex.set_random_seed",
"cctbx.xray.scatterer",
"cctbx.crystal.symmetry",
"cctbx.maptbx.minimization.run"
]
| [((319, 333), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (330, 333), False, 'import random\n'), ((336, 359), 'cctbx.array_family.flex.set_random_seed', 'flex.set_random_seed', (['(0)'], {}), '(0)\n', (356, 359), False, 'from cctbx.array_family import flex\n'), ((397, 475), 'cctbx.crystal.symmetry', 'crystal.symmetry', ([], {'unit_cell': '(10, 10, 10, 90, 90, 90)', 'space_group_symbol': '"""P 1"""'}), "(unit_cell=(10, 10, 10, 90, 90, 90), space_group_symbol='P 1')\n", (413, 475), False, 'from cctbx import crystal\n'), ((4336, 4373), 'libtbx.test_utils.approx_equal', 'approx_equal', (['start_error', '(0.8)', '(0.001)'], {}), '(start_error, 0.8, 0.001)\n', (4348, 4373), False, 'from libtbx.test_utils import approx_equal\n'), ((6872, 7114), 'cctbx.maptbx.minimization.run', 'minimization.run', ([], {'xray_structure': 'xrs_sh_', 'miller_array': 'miller_array', 'crystal_gridding': 'crystal_gridding', 'map_target': 'map_target', 'max_iterations': '(500)', 'min_iterations': '(25)', 'step': '(0.5)', 'geometry_restraints_manager': 'None', 'target_type': '"""simple"""'}), "(xray_structure=xrs_sh_, miller_array=miller_array,\n crystal_gridding=crystal_gridding, map_target=map_target,\n max_iterations=500, min_iterations=25, step=0.5,\n geometry_restraints_manager=None, target_type='simple')\n", (6888, 7114), False, 'from cctbx.maptbx import minimization\n'), ((3746, 3989), 'cctbx.maptbx.minimization.run', 'minimization.run', ([], {'xray_structure': 'xrs_sh', 'miller_array': 'miller_array', 'crystal_gridding': 'crystal_gridding', 'map_target': 'map_target', 'max_iterations': '(500)', 'min_iterations': '(25)', 'step': 'step', 'geometry_restraints_manager': 'None', 'target_type': '"""diffmap"""'}), "(xray_structure=xrs_sh, miller_array=miller_array,\n crystal_gridding=crystal_gridding, map_target=map_target,\n max_iterations=500, min_iterations=25, step=step,\n geometry_restraints_manager=None, target_type='diffmap')\n", (3762, 3989), False, 'from cctbx.maptbx import minimization\n'), ((5874, 5894), 'libtbx.test_utils.approx_equal', 'approx_equal', (['gi', 'gj'], {}), '(gi, gj)\n', (5886, 5894), False, 'from libtbx.test_utils import approx_equal\n'), ((3259, 3292), 'cctbx.sgtbx.space_group_info', 'sgtbx.space_group_info', (['"""P212121"""'], {}), "('P212121')\n", (3281, 3292), False, 'from cctbx import sgtbx\n'), ((4851, 4884), 'cctbx.sgtbx.space_group_info', 'sgtbx.space_group_info', (['"""P212121"""'], {}), "('P212121')\n", (4873, 4884), False, 'from cctbx import sgtbx\n'), ((6383, 6416), 'cctbx.sgtbx.space_group_info', 'sgtbx.space_group_info', (['"""P212121"""'], {}), "('P212121')\n", (6405, 6416), False, 'from cctbx import sgtbx\n'), ((2453, 2619), 'cctbx.maptbx.minimization.run', 'minimization.run', ([], {'xray_structure': 'xrs_cp', 'miller_array': 'f_calc', 'crystal_gridding': 'crystal_gridding', 'map_target': 'map_target', 'step': '(d_min / 4)', 'target_type': '"""diffmap"""'}), "(xray_structure=xrs_cp, miller_array=f_calc,\n crystal_gridding=crystal_gridding, map_target=map_target, step=d_min / \n 4, target_type='diffmap')\n", (2469, 2619), False, 'from cctbx.maptbx import minimization\n'), ((587, 628), 'cctbx.xray.scatterer', 'xray.scatterer', ([], {'label': '"""C"""', 'site': '(0, 0, 0)'}), "(label='C', site=(0, 0, 0))\n", (601, 628), False, 'from cctbx import xray\n')] |
from pytest import raises
from datek_app_utils.env_config.base import BaseConfig
from datek_app_utils.env_config.errors import InstantiationForbiddenError
class SomeOtherMixinWhichDoesntRelateToEnvConfig:
color = "red"
class TestConfig:
def test_iter(self, monkeypatch, key_volume, base_config_class):
volume = 5
monkeypatch.setenv(key_volume, str(volume))
class Config(SomeOtherMixinWhichDoesntRelateToEnvConfig, base_config_class):
TYPE: str
items = [item for item in Config]
assert len(items) == 5
assert Config.color == "red"
assert items[0].name == "TYPE"
assert items[0].value is None
assert items[0].type == str
assert items[1].name == "FIELD_WITH_DEFAULT_VALUE"
assert items[1].value == "C"
assert items[1].type == str
assert items[2].name == "NON_MANDATORY_FIELD"
assert items[2].value is None
assert items[2].type == str
assert items[3].name == "TYPED_NON_MANDATORY_FIELD"
assert items[3].value is None
assert items[3].type == str
assert items[4].name == "VOLUME"
assert items[4].value == volume
assert items[4].type == int
def test_get(self, monkeypatch, key_volume, base_config_class):
volume = 10
monkeypatch.setenv(key_volume, str(volume))
assert getattr(base_config_class, "VOLUME") == volume
def test_constructor_is_forbidden(self):
class Config(BaseConfig):
pass
with raises(InstantiationForbiddenError):
Config()
| [
"pytest.raises"
]
| [((1550, 1585), 'pytest.raises', 'raises', (['InstantiationForbiddenError'], {}), '(InstantiationForbiddenError)\n', (1556, 1585), False, 'from pytest import raises\n')] |
# -*- coding: utf-8 -*-
# Name: comprehend
# Version: 0.1a2
# Owner: <NAME>
# Maintainer(s):
import boto3
def get_sentiment(text, language_code='en'):
"""Get sentiment.
Inspects text and returns an inference of the prevailing sentiment
(positive, neutral, mixed, or negative).
Args:
text: UTF-8 text string. Each string must contain fewer that
5,000 bytes of UTF-8 encoded characters (required | type: str).
language_code: language of text (not required | type: str |
default: 'en').
Returns:
sentiment: sentiment: positive, neutral, mixed, or negative
(type: str).
"""
def prepare_text(text):
while len(bytes(text, 'utf-8')) > 4999:
text = text[:-1]
return text
comprehend = boto3.client('comprehend')
text = prepare_text(text)
try:
r = comprehend.detect_sentiment(Text=text, LanguageCode='en')
except Exception as e:
raise e
sentiment = r['Sentiment'].lower()
return sentiment
# Example. Get sentiment of text below:
# "I ordered a small and expected it to fit just right but it was a little bit
# more like a medium-large. It was great quality. It's a lighter brown than
# pictured but fairly close. Would be ten times better if it was lined with
# cotton or wool on the inside."
# text = "I ordered a small and expected it to fit just right but it was a \
# little bit more like a medium-large. It was great quality. It's a \
# lighter brown than pictured but fairly close. Would be ten times \
# better if it was lined with cotton or wool on the inside."
# get_sentiment(text)
| [
"boto3.client"
]
| [((825, 851), 'boto3.client', 'boto3.client', (['"""comprehend"""'], {}), "('comprehend')\n", (837, 851), False, 'import boto3\n')] |
from sys import argv
from getopt import getopt
from os import R_OK, access
from string import Template
DEFAULT_DATASET_FILE_PATH = "dataset/data.csv"
DEFAULT_DATASET_COLUMNS = ['surface (m2)', 'height (m)', 'latitude', 'housing_type', 'longitude', 'country_code',
'city']
DEFAULT_VISU = ["scatter_plot", "histogram"]
DEFAULT_RANGE = [0, 1000]
def arguments():
options, *_ = getopt(argv[1:], 'dc', ['dataset-file=', 'columns=', 'visus=', 'range='])
dataset_file = DEFAULT_DATASET_FILE_PATH
dataset_columns = DEFAULT_DATASET_COLUMNS
dataset_visus = DEFAULT_VISU
dataset_range = DEFAULT_RANGE
for opt, arg in options:
if opt in ('-d', '--dataset-file'):
dataset_file = arg
elif opt in ('-c', '--columns'):
dataset_columns = arg.split(',')
elif opt in ('-v', '--visus'):
dataset_visus = arg.split(',')
elif opt in ('-r', '--range'):
dataset_range = arg.split(',')
dataset_range = list(map(lambda x: int(x), dataset_range))
if len(dataset_range) == 1 :
dataset_range.append(DEFAULT_RANGE[1])
if not access(dataset_file, R_OK):
raise RuntimeError(Template("the file $file does not exists or is not readable.").substitute(file=dataset_file))
for column in dataset_columns:
if column not in DEFAULT_DATASET_COLUMNS:
raise RuntimeError(Template("Invalid column $column must be one of $columns.").
substitute(column=column, columns=','.join(DEFAULT_DATASET_COLUMNS)))
for visu in dataset_visus:
if visu not in DEFAULT_VISU:
raise RuntimeError(Template("Invalid visu $column must be one of $columns.").
substitute(column=visu, columns=','.join(DEFAULT_VISU)))
for range_num in dataset_range:
if range_num not in range(0, 1001):
raise RuntimeError(Template("Invalid range $column must be between 0 and 999.").
substitute(column=range_num))
return dataset_file, dataset_columns, dataset_visus, dataset_range
| [
"getopt.getopt",
"os.access",
"string.Template"
]
| [((407, 480), 'getopt.getopt', 'getopt', (['argv[1:]', '"""dc"""', "['dataset-file=', 'columns=', 'visus=', 'range=']"], {}), "(argv[1:], 'dc', ['dataset-file=', 'columns=', 'visus=', 'range='])\n", (413, 480), False, 'from getopt import getopt\n'), ((1149, 1175), 'os.access', 'access', (['dataset_file', 'R_OK'], {}), '(dataset_file, R_OK)\n', (1155, 1175), False, 'from os import R_OK, access\n'), ((1204, 1266), 'string.Template', 'Template', (['"""the file $file does not exists or is not readable."""'], {}), "('the file $file does not exists or is not readable.')\n", (1212, 1266), False, 'from string import Template\n'), ((1415, 1474), 'string.Template', 'Template', (['"""Invalid column $column must be one of $columns."""'], {}), "('Invalid column $column must be one of $columns.')\n", (1423, 1474), False, 'from string import Template\n'), ((1677, 1734), 'string.Template', 'Template', (['"""Invalid visu $column must be one of $columns."""'], {}), "('Invalid visu $column must be one of $columns.')\n", (1685, 1734), False, 'from string import Template\n'), ((1936, 1996), 'string.Template', 'Template', (['"""Invalid range $column must be between 0 and 999."""'], {}), "('Invalid range $column must be between 0 and 999.')\n", (1944, 1996), False, 'from string import Template\n')] |
# Must run example4.py first
# Read an Excel sheet and save running config of devices using pandas
import pandas as pd
from netmiko import ConnectHandler
# Read Excel file of .xlsx format
data = pd.read_excel(io="Example4-Device-Details.xlsx", sheet_name=0)
# Convert data to data frame
df = pd.DataFrame(data=data)
# Conevrt data frame from MGMT IP Address to a list
device_ip_list = df.iloc[:, 1].tolist()
# Define devices variable
devices = []
for ip in device_ip_list:
devices.append(
{
"device_type": "cisco_ios", # must be the same for all devices
"ip": ip,
"username": "developer", # must be the same for all devices
"password": "<PASSWORD>", # must be the same for all devices
"port": 22, # must be the same for all devices
# If port for all devices is not 22 you will get an error
"fast_cli": False,
}
)
for device in devices:
# Create a connection instance
with ConnectHandler(**device) as net_connect:
# hostname of the current device
hostname = net_connect.send_command(
command_string="show version", use_textfsm=True
)[0]["hostname"]
run_cfg: str = net_connect.send_command(command_string="show running-config")
# Create .txt for each running configuration of each device
with open(file=f"{hostname}_ex7-run-cfg.txt", mode="w") as outfile:
outfile.write(run_cfg.lstrip())
print("Done")
| [
"pandas.DataFrame",
"netmiko.ConnectHandler",
"pandas.read_excel"
]
| [((198, 260), 'pandas.read_excel', 'pd.read_excel', ([], {'io': '"""Example4-Device-Details.xlsx"""', 'sheet_name': '(0)'}), "(io='Example4-Device-Details.xlsx', sheet_name=0)\n", (211, 260), True, 'import pandas as pd\n'), ((296, 319), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data'}), '(data=data)\n', (308, 319), True, 'import pandas as pd\n'), ((1000, 1024), 'netmiko.ConnectHandler', 'ConnectHandler', ([], {}), '(**device)\n', (1014, 1024), False, 'from netmiko import ConnectHandler\n')] |
"""Utilities for interacting with GitHub"""
import os
import json
import webbrowser
import stat
import sys
from git import Repo
from .context import Context
event_dict = {
"added_to_project": (
lambda event: "{} added the issue to a project.".format(event["actor"]["login"])
),
"assigned": (
lambda event: "{} assigned the issue to {}.".format(
event["actor"]["login"], event["assignee"]["login"]
)
),
"closed": (lambda event: "{} closed this issue.".format(event["actor"]["login"])),
"converted_note_to_issue": (
lambda event: "{} created this issue from a note.".format(
event["actor"]["login"]
)
),
"demilestoned": (lambda event: "The issue was removed from a milestone."),
"head_ref_deleted": (lambda event: "The pull request's branch was deleted."),
"head_ref_restored": (lambda event: "The pull request's branch was restored."),
"labelled": (
lambda event: "{} added {} label to the issue.".format(
event["actor"]["login"], event["label"]
)
),
"locked": (
lambda event: "The issue was locked by {}.".format(event["actor"]["login"])
),
"mentioned": (
lambda event: "{} was mentioned in the issue's body.".format(
event["actor"]["login"]
)
),
"marked_as_duplicate": (
lambda event: "The issue was marked duplicate by {}.".format(
event["actor"]["login"]
)
),
"merged": (
lambda event: "The issue was merged by {}.".format(event["actor"]["login"])
),
"milestoned": (lambda event: "The issue was added to a milestone."),
"moved_columns_in_project": (
lambda event: "The issue was moved between columns in a project board."
),
"referenced": (lambda event: "The issue was referenced from a commit message."),
"renamed": (lambda event: "The title of the issue was changed."),
"reopened": (
lambda event: "The issue was reopened by {}".format(event["actor"]["login"])
),
"review_dismissed": (
lambda event: "{} dismissed a review from the pull request.".format(
event["actor"]["login"]
)
),
"review_requested": (
lambda event: "{} requested review from the subject on this pull request.".format(
event["actor"]["login"]
)
),
"review_request_removed": (
lambda event: "{} removed the review request for the subject on this pull request.".format(
event["actor"]["login"]
)
),
"subscribed": (
lambda event: "{} subscribed to receive notifications for the issue.".format(
event["actor"]["login"]
)
),
"transferred": (lambda event: "The issue was transferred to another repository."),
"unassigned": (
lambda event: "{} was unassigned from the issue.".format(
event["actor"]["login"]
)
),
"unlabeled": (lambda event: "A label was removed from the issue."),
"unlocked": (
lambda event: "The issue was unlocked by {}".format(event["actor"]["login"])
),
"unmarked_as_duplicate": (lambda event: "The was unmarked as dublicate."),
"user_blocked": (lambda event: "A user was blocked from the organization."),
}
def authorize(ghub, reauthorize=False, fromenv=False):
"""Authorize a user for GHub
Keyword arguments:
ghub -- the ghub object that needs authorization
reauthorize -- performs authorization again (default False)
"""
if fromenv:
oauth_data = json.loads(os.environ["GHUB_CRED"])
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
if not os.path.isfile(ghub.data_path / ghub.auth_filename) or reauthorize:
authorization_base_url = "https://github.com/login/oauth/authorize"
token_url = "https://github.com/login/oauth/access_token"
authorization_url, _ = ghub.github.authorization_url(authorization_base_url)
webbrowser.open(authorization_url)
print("Please visit this site and grant access: {}".format(authorization_url))
redirect_response = input(
"Please enter the URL you were redirected to after granting access: "
)
try:
response = ghub.github.fetch_token(
token_url,
client_secret=ghub.client_secret,
authorization_response=redirect_response,
)
except Exception as e:
print(e)
print(
"Network Error. Make sure you have a working internet connection and try again."
)
sys.exit(1)
if not os.path.isdir(ghub.data_path):
os.makedirs(ghub.data_path)
data_file = open(ghub.data_path / ghub.auth_filename, "w+")
json.dump(response, data_file)
data_file.close()
os.chmod(ghub.data_path / ghub.auth_filename, stat.S_IRUSR | stat.S_IWUSR)
ghub.oauth_data = response
return True
else:
data_file = open(ghub.data_path / ghub.auth_filename, "r")
oauth_data = json.loads(data_file.read())
data_file.close()
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
def get_user(ghub, user):
url = ghub.api_url + ghub.endpoints["users"] + user
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "user"
ghub.context.location = user
ghub.context.cache = response.json()
return True
return False
def get_org(ghub, org):
url = ghub.api_url + ghub.endpoints["orgs"] + org
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "org"
ghub.context.location = org
ghub.context.cache = response.json()
return True
return False
def get_user_tabs(ghub, tab=""):
tabs = ["repos", "stars", "followers", "following", "notifications"]
if tab not in tabs:
print("{} is not a valid user tab".format(tab))
return
if ghub.context.context == "root":
if tab == "":
ghub.context.set_context_to_root()
elif tab == "repos":
response = ghub.github.get(ghub.api_url + ghub.endpoints["user"] + "/repos")
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + "repos"
ghub.context.context = "repos"
else:
print("Error getting data - " + response.status_code)
elif tab == "stars":
response = ghub.github.get(
ghub.api_url + ghub.endpoints["user"] + "/starred"
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + "stars"
ghub.context.context = "stars"
else:
print("Error getting data - " + response.status_code)
elif tab == "followers" or tab == "following":
response = ghub.github.get(
ghub.api_url + ghub.endpoints["user"] + "/" + tab
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
elif tab == "notifications":
response = ghub.github.get(ghub.api_url + ghub.endpoints["notifications"])
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
elif ghub.context.context == "user" or ghub.context.context == "org":
if tab == "":
ghub.context.set_context_to_root()
elif tab == "repos":
if ghub.context.context == "user":
url = (
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/repos"
)
else:
url = (
ghub.api_url
+ ghub.endpoints["orgs"]
+ ghub.context.location
+ "/repos"
)
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = (
ghub.context.prev_context.location + "/" + "repos"
)
ghub.context.context = "repos"
else:
print("Error getting data - " + response.status_code)
elif tab == "stars":
response = ghub.github.get(
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/starred"
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = (
ghub.context.prev_context.location + "/" + "star"
)
ghub.context.context = "stars"
else:
print("Error getting data - " + response.status_code)
elif tab == "followers" or tab == "following":
response = ghub.github.get(
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/"
+ tab
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.context.prev_context.location + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
else:
pass
def get_latest_commit(ghub, repo, branch="master"):
api_url = "https://api.github.com/repos/{}/branches/{}".format(repo, branch)
response = ghub.github.get(api_url)
if response.status_code == 200:
response = response.json()
return response["commit"]["commit"]
else:
return False
def get_tree(ghub, repo=None, branch="master", tree_url=None):
if tree_url == None:
latest_commit = get_latest_commit(ghub, repo, branch)
if latest_commit == False:
return False
response = ghub.github.get(latest_commit["tree"]["url"])
if response.status_code == 200:
response = response.json()
return response
return False
else:
response = ghub.github.get(tree_url)
if response.status_code == 200:
response = response.json()
return response
def get_blob(ghub, blob_url):
response = ghub.github.get(blob_url)
if response.status_code == 200:
return response.json()
return False
def clone_repo(ghub, dir, repo_name=None):
print("Preparing to clone...")
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
if dir[0] == "~":
dir = os.path.expanduser("~") + dir[1:]
dir = dir + "/" + repo_name.split("/")[1]
try:
Repo.clone_from("https://github.com/" + repo_name, dir)
print("{} cloned to {}".format(repo_name, dir))
return True
except Exception as e:
print(e)
return False
def star_repo(ghub, repo_name=None):
print("Starring repo...")
if repo_name == None:
repo_name = ghub.context.location
star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name
response = ghub.github.get(star_url)
if response.status_code == 204:
print("Repo is already starred.")
elif response.status_code == 404:
resp = ghub.github.put(star_url)
if resp.status_code == 204:
print("{} starred".format(repo_name))
else:
print("Error starring repo")
def unstar_repo(ghub, repo_name=None):
print("Unstarring repo...")
if repo_name == None:
repo_name = ghub.context.location
star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name
response = ghub.github.get(star_url)
if response.status_code == 204:
resp = ghub.github.delete(star_url)
if resp.status_code == 204:
print("{} unstarred".format(repo_name))
else:
print("Error unstarring repo")
elif response.status_code == 404:
print("Repo is not starred.")
def watch_repo(ghub, repo_name=None):
print("Subscribing to repo...")
if repo_name == None:
repo_name = ghub.context.location
watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription"
response = ghub.github.get(watch_url)
if response.status_code == 200:
print("You are already watching this repo.")
elif response.status_code == 404:
resp = ghub.github.put(watch_url)
if resp.status_code == 200:
print("Watching {}".format(repo_name))
else:
print("Error subscribing to repo")
def unwatch_repo(ghub, repo_name=None):
print("Unsubscribing repo...")
if repo_name == None:
repo_name = ghub.context.location
watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription"
response = ghub.github.get(watch_url)
if response.status_code == 200:
resp = ghub.github.delete(watch_url)
if resp.status_code == 204:
print("{} unsubscribed".format(repo_name))
else:
print("Error unsubscribing to repo")
elif response.status_code == 404:
print("You are not watching this repo.")
def fork_repo(ghub, repo_name=None):
print("Forking Repo...")
if repo_name == None:
repo_name = ghub.context.location.split("/")
repo_name = "/".join(repo_name[:2])
true_repo_name = repo_name.split("/")[1]
forked_url = (
ghub.api_url
+ ghub.endpoints["repos"]
+ ghub.get_user_username()
+ "/"
+ true_repo_name
)
response = ghub.github.get(forked_url)
if response.status_code == 200:
print("Cannot fork. Repo Already Exists.")
return False
print("Repo is being forked. Please wait for it to complete.", end="")
response = ghub.github.post(
ghub.api_url + ghub.endpoints["repos"] + repo_name + "/forks"
)
if response.status_code == 202:
print(
"\nForking complete. Forked repo to {}".format(
ghub.get_user_username() + "/" + true_repo_name
)
)
return True
else:
print("Error while trying fork.")
return False
def get_prs(ghub, repo_name=None):
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
pr_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/pulls"
response = ghub.github.get(pr_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "pull_requests"
ghub.context.location = repo_name + "/pull_requests"
ghub.context.cache = response.json()
return True
return False
def get_pr(ghub, pr_no):
if not pr_no.isdigit():
print("Invalid PR number")
return False
repo_name = "/".join(ghub.context.location.split("/")[:2])
pr_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/pulls/" + pr_no
response = ghub.github.get(pr_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "pull_request"
ghub.context.location = repo_name + "/pull_requests/" + pr_no
ghub.context.cache = response.json()
return True
elif response.status_code == 404:
print("No PR found with PR number {}".format(pr_no))
return False
def get_pr_info(ghub, info_type="comments"):
info_url = ghub.context.cache["_links"][info_type]["href"]
response = ghub.github.get(info_url)
return response.json(), response.status_code
def get_issues(ghub, repo_name=None):
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
issue_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/issues"
response = ghub.github.get(issue_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "issues"
ghub.context.location = repo_name + "/issues"
ghub.context.cache = response.json()
return True
return False
def get_issue(ghub, issue_no):
if not issue_no.isdigit():
print("Invalid issue number")
return False
repo_name = "/".join(ghub.context.location.split("/")[:2])
issue_url = (
ghub.api_url + ghub.endpoints["repos"] + repo_name + "/issues/" + issue_no
)
response = ghub.github.get(issue_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "issue"
ghub.context.location = repo_name + "/issues/" + issue_no
ghub.context.cache = response.json()
return True
elif response.status_code == 404:
print("No issue found with issue number {}".format(issue_no))
return False
def get_issue_info(ghub, info_type="comments"):
info_url = ghub.context.cache["{}_url".format(info_type)]
response = ghub.github.get(info_url)
return response.json(), response.status_code
| [
"os.path.expanduser",
"json.loads",
"os.makedirs",
"git.Repo.clone_from",
"webbrowser.open",
"os.chmod",
"os.path.isfile",
"os.path.isdir",
"sys.exit",
"json.dump"
]
| [((3575, 3610), 'json.loads', 'json.loads', (["os.environ['GHUB_CRED']"], {}), "(os.environ['GHUB_CRED'])\n", (3585, 3610), False, 'import json\n'), ((4021, 4055), 'webbrowser.open', 'webbrowser.open', (['authorization_url'], {}), '(authorization_url)\n', (4036, 4055), False, 'import webbrowser\n'), ((4848, 4878), 'json.dump', 'json.dump', (['response', 'data_file'], {}), '(response, data_file)\n', (4857, 4878), False, 'import json\n'), ((4913, 4987), 'os.chmod', 'os.chmod', (['(ghub.data_path / ghub.auth_filename)', '(stat.S_IRUSR | stat.S_IWUSR)'], {}), '(ghub.data_path / ghub.auth_filename, stat.S_IRUSR | stat.S_IWUSR)\n', (4921, 4987), False, 'import os\n'), ((12153, 12208), 'git.Repo.clone_from', 'Repo.clone_from', (["('https://github.com/' + repo_name)", 'dir'], {}), "('https://github.com/' + repo_name, dir)\n", (12168, 12208), False, 'from git import Repo\n'), ((3718, 3769), 'os.path.isfile', 'os.path.isfile', (['(ghub.data_path / ghub.auth_filename)'], {}), '(ghub.data_path / ghub.auth_filename)\n', (3732, 3769), False, 'import os\n'), ((4701, 4730), 'os.path.isdir', 'os.path.isdir', (['ghub.data_path'], {}), '(ghub.data_path)\n', (4714, 4730), False, 'import os\n'), ((4744, 4771), 'os.makedirs', 'os.makedirs', (['ghub.data_path'], {}), '(ghub.data_path)\n', (4755, 4771), False, 'import os\n'), ((12056, 12079), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (12074, 12079), False, 'import os\n'), ((4674, 4685), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4682, 4685), False, 'import sys\n')] |
# Generated by Django 3.0.7 on 2020-09-18 05:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import multiselectfield.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Equipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[(None, 'Please select'), ('tractor', 'Tractor'), ('implement', 'Implement'), ('other_equipment', 'Other Equipment')], max_length=100, verbose_name='What Equipment you want to Add?')),
],
),
migrations.CreateModel(
name='ImplementCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='implements_category')),
],
options={
'verbose_name_plural': 'Implement Categories',
},
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=18)),
],
),
migrations.CreateModel(
name='TractorCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='tractor_category')),
],
options={
'verbose_name_plural': 'Tractor Categories',
},
),
migrations.CreateModel(
name='Tractor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('drive_type', models.CharField(choices=[(None, 'Please Select'), ('two wheel drive', 'Two wheel Drive'), ('four wheel drive', 'Four wheel Drive')], max_length=100, verbose_name='What Drive Type')),
('name', models.CharField(help_text='eg. <NAME> 6190R', max_length=200, verbose_name='Name/Models of Tractor')),
('mode_of_transmission', models.CharField(choices=[(None, 'Please Select'), ('gear', 'Gear'), ('manual', 'Manual'), ('hydrostatic', 'Hydrostatic'), ('turbochanged', 'Turbocharged')], max_length=100, verbose_name='Mode of Transmission')),
('engine_hp', models.PositiveIntegerField(verbose_name='Engine Horse Power (eg. 75hp)')),
('drawbar_hp', models.PositiveIntegerField(verbose_name='Drawbar Horse Power (eg. 65hp)')),
('pto_hp', models.PositiveIntegerField(verbose_name='PTO Horse Power (eg. 85hp)')),
('hydraulic_capacity', models.CharField(help_text='Use a SI units of gpm or psi', max_length=100, verbose_name='Hydaulic capacity (gallon per minutes(gpm) or psi-pound per square inchies)')),
('type_of_hitching', models.CharField(choices=[(None, 'Please Select'), ('two point hitches', 'Two-point hitches'), ('three point hitches', 'Three-point hitches')], max_length=100, verbose_name='What is Hitching type?')),
('cab', models.BooleanField(default=False, verbose_name='Does have a cab?')),
('rollover_protection', models.BooleanField(default=False, verbose_name='Does have the rollover protection?')),
('fuel_consumption', models.PositiveIntegerField(verbose_name='Fuel consumption (gallon per hour on operation)')),
('attachment_mode', models.CharField(choices=[(None, 'Please select'), ('frontend loader', 'frontend loader'), ('backhoe', 'Backhoe'), ('both', 'Both')], max_length=100, verbose_name='What mode of attachment?')),
('operator', models.BooleanField(default=False, verbose_name='Do you have an operator(s)?')),
('file', models.FileField(help_text='Upload quality picture of real tractor you have, only 5 picture.', upload_to='tractors_photos/', verbose_name='Upload the Tractor pictures')),
('other_informations', models.TextField(blank=True, verbose_name='Describe your Tractor')),
('price_hour', models.PositiveIntegerField(verbose_name='Specify the price per Hour in TShs.')),
('price_hectare', models.PositiveIntegerField(verbose_name='Specify the price per Hectare')),
('farm_services', multiselectfield.db.fields.MultiSelectField(choices=[('soil cultivations', 'Soil cultivations'), ('planting', 'Planting'), ('haversting/post-haversting', 'Haversting/Post-Haversting'), ('fertilizing & pest-control', 'Fertilizing & Pest-control'), ('drainage & irrigation', 'Drainage & Irrigation'), ('loading', 'Loading'), ('hay making', 'Hay making'), ('miscellaneous', 'Miscellaneous')], max_length=135, verbose_name='What are farming service(s) do you offer?')),
('agree_terms', models.BooleanField(default=False, verbose_name='Do your Accept our Terms and Conditions?')),
('status', models.CharField(choices=[('pending', 'Pending'), ('approved', 'Approved')], default='pending', max_length=100)),
('tractor_type', models.ForeignKey(on_delete=models.SET('others'), to='equipments.TractorCategory', verbose_name='What type of Tractor?')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ImplementSubCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='equipments.ImplementCategory')),
],
options={
'verbose_name_plural': 'Implement Subcategories',
},
),
migrations.CreateModel(
name='Implement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100, verbose_name='Name/Models of Implement')),
('width', models.PositiveIntegerField(help_text='SI UNITS in metre', verbose_name='Width of the Implement')),
('weight', models.PositiveIntegerField(help_text='SI UNITS in KG', verbose_name='Weight of the Implement')),
('operation_mode', models.CharField(choices=[(None, 'Please Select'), ('tractor drive', 'Tractor drive'), ('self-propelled', 'Self-propelled')], max_length=100, verbose_name='What is mode of operation?')),
('pto', models.PositiveIntegerField(verbose_name='What is Horse Power required for Operation?')),
('hydraulic_capacity', models.CharField(max_length=100, verbose_name='What is Hydaulic capacity required to lift?')),
('operator', models.BooleanField(verbose_name='Do you have an operator(s)?')),
('file', models.FileField(help_text='Upload quality picture of real implement you have, only 5 pictures.', upload_to='implements_photos/', verbose_name='Upload the Implement pictures')),
('other_informations', models.TextField(blank=True, verbose_name='Describe your Implement')),
('price_hour', models.PositiveIntegerField(verbose_name='Specify the price per Hour')),
('price_hectare', models.PositiveIntegerField(verbose_name='Specify the price per Hectare')),
('agree_terms', models.BooleanField(default=False, verbose_name='Do your Accept our Terms and Conditions?')),
('status', models.CharField(choices=[('pending', 'Pending'), ('approved', 'Approved')], default='pending', max_length=100)),
('category', models.ForeignKey(on_delete=models.SET('others'), to='equipments.ImplementCategory', verbose_name='What category of your Implement')),
('subcategory', models.ForeignKey(on_delete=models.SET('others'), to='equipments.ImplementSubCategory', verbose_name='What is subcategory of your Implement')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"django.db.models.SET",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.FileField",
"django.db.models.DateTimeField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.ImageField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
]
| [((281, 338), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (312, 338), False, 'from django.db import migrations, models\n'), ((472, 565), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (488, 565), False, 'from django.db import migrations, models\n'), ((589, 804), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(None, 'Please select'), ('tractor', 'Tractor'), ('implement', 'Implement'\n ), ('other_equipment', 'Other Equipment')]", 'max_length': '(100)', 'verbose_name': '"""What Equipment you want to Add?"""'}), "(choices=[(None, 'Please select'), ('tractor', 'Tractor'),\n ('implement', 'Implement'), ('other_equipment', 'Other Equipment')],\n max_length=100, verbose_name='What Equipment you want to Add?')\n", (605, 804), False, 'from django.db import migrations, models\n'), ((939, 1032), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (955, 1032), False, 'from django.db import migrations, models\n'), ((1056, 1088), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1072, 1088), False, 'from django.db import migrations, models\n'), ((1117, 1167), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""implements_category"""'}), "(upload_to='implements_category')\n", (1134, 1167), False, 'from django.db import migrations, models\n'), ((1398, 1491), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1414, 1491), False, 'from django.db import migrations, models\n'), ((1516, 1547), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(18)'}), '(max_length=18)\n', (1532, 1547), False, 'from django.db import migrations, models\n'), ((1688, 1781), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1704, 1781), False, 'from django.db import migrations, models\n'), ((1805, 1837), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1821, 1837), False, 'from django.db import migrations, models\n'), ((1866, 1913), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""tractor_category"""'}), "(upload_to='tractor_category')\n", (1883, 1913), False, 'from django.db import migrations, models\n'), ((2144, 2237), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2160, 2237), False, 'from django.db import migrations, models\n'), ((2264, 2303), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2284, 2303), False, 'from django.db import migrations, models\n'), ((2335, 2370), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2355, 2370), False, 'from django.db import migrations, models\n'), ((2404, 2593), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(None, 'Please Select'), ('two wheel drive', 'Two wheel Drive'), (\n 'four wheel drive', 'Four wheel Drive')]", 'max_length': '(100)', 'verbose_name': '"""What Drive Type"""'}), "(choices=[(None, 'Please Select'), ('two wheel drive',\n 'Two wheel Drive'), ('four wheel drive', 'Four wheel Drive')],\n max_length=100, verbose_name='What Drive Type')\n", (2420, 2593), False, 'from django.db import migrations, models\n'), ((2613, 2719), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""eg. <NAME> 6190R"""', 'max_length': '(200)', 'verbose_name': '"""Name/Models of Tractor"""'}), "(help_text='eg. <NAME> 6190R', max_length=200, verbose_name\n ='Name/Models of Tractor')\n", (2629, 2719), False, 'from django.db import migrations, models\n'), ((2758, 2977), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(None, 'Please Select'), ('gear', 'Gear'), ('manual', 'Manual'), (\n 'hydrostatic', 'Hydrostatic'), ('turbochanged', 'Turbocharged')]", 'max_length': '(100)', 'verbose_name': '"""Mode of Transmission"""'}), "(choices=[(None, 'Please Select'), ('gear', 'Gear'), (\n 'manual', 'Manual'), ('hydrostatic', 'Hydrostatic'), ('turbochanged',\n 'Turbocharged')], max_length=100, verbose_name='Mode of Transmission')\n", (2774, 2977), False, 'from django.db import migrations, models\n'), ((3001, 3074), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'verbose_name': '"""Engine Horse Power (eg. 75hp)"""'}), "(verbose_name='Engine Horse Power (eg. 75hp)')\n", (3028, 3074), False, 'from django.db import migrations, models\n'), ((3108, 3182), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'verbose_name': '"""Drawbar Horse Power (eg. 65hp)"""'}), "(verbose_name='Drawbar Horse Power (eg. 65hp)')\n", (3135, 3182), False, 'from django.db import migrations, models\n'), ((3212, 3282), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'verbose_name': '"""PTO Horse Power (eg. 85hp)"""'}), "(verbose_name='PTO Horse Power (eg. 85hp)')\n", (3239, 3282), False, 'from django.db import migrations, models\n'), ((3324, 3504), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Use a SI units of gpm or psi"""', 'max_length': '(100)', 'verbose_name': '"""Hydaulic capacity (gallon per minutes(gpm) or psi-pound per square inchies)"""'}), "(help_text='Use a SI units of gpm or psi', max_length=100,\n verbose_name=\n 'Hydaulic capacity (gallon per minutes(gpm) or psi-pound per square inchies)'\n )\n", (3340, 3504), False, 'from django.db import migrations, models\n'), ((3530, 3736), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(None, 'Please Select'), ('two point hitches', 'Two-point hitches'), (\n 'three point hitches', 'Three-point hitches')]", 'max_length': '(100)', 'verbose_name': '"""What is Hitching type?"""'}), "(choices=[(None, 'Please Select'), ('two point hitches',\n 'Two-point hitches'), ('three point hitches', 'Three-point hitches')],\n max_length=100, verbose_name='What is Hitching type?')\n", (3546, 3736), False, 'from django.db import migrations, models\n'), ((3755, 3822), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Does have a cab?"""'}), "(default=False, verbose_name='Does have a cab?')\n", (3774, 3822), False, 'from django.db import migrations, models\n'), ((3865, 3955), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Does have the rollover protection?"""'}), "(default=False, verbose_name=\n 'Does have the rollover protection?')\n", (3884, 3955), False, 'from django.db import migrations, models\n'), ((3990, 4086), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'verbose_name': '"""Fuel consumption (gallon per hour on operation)"""'}), "(verbose_name=\n 'Fuel consumption (gallon per hour on operation)')\n", (4017, 4086), False, 'from django.db import migrations, models\n'), ((4120, 4318), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(None, 'Please select'), ('frontend loader', 'frontend loader'), (\n 'backhoe', 'Backhoe'), ('both', 'Both')]", 'max_length': '(100)', 'verbose_name': '"""What mode of attachment?"""'}), "(choices=[(None, 'Please select'), ('frontend loader',\n 'frontend loader'), ('backhoe', 'Backhoe'), ('both', 'Both')],\n max_length=100, verbose_name='What mode of attachment?')\n", (4136, 4318), False, 'from django.db import migrations, models\n'), ((4342, 4420), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Do you have an operator(s)?"""'}), "(default=False, verbose_name='Do you have an operator(s)?')\n", (4361, 4420), False, 'from django.db import migrations, models\n'), ((4448, 4625), 'django.db.models.FileField', 'models.FileField', ([], {'help_text': '"""Upload quality picture of real tractor you have, only 5 picture."""', 'upload_to': '"""tractors_photos/"""', 'verbose_name': '"""Upload the Tractor pictures"""'}), "(help_text=\n 'Upload quality picture of real tractor you have, only 5 picture.',\n upload_to='tractors_photos/', verbose_name='Upload the Tractor pictures')\n", (4464, 4625), False, 'from django.db import migrations, models\n'), ((4658, 4724), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Describe your Tractor"""'}), "(blank=True, verbose_name='Describe your Tractor')\n", (4674, 4724), False, 'from django.db import migrations, models\n'), ((4758, 4837), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'verbose_name': '"""Specify the price per Hour in TShs."""'}), "(verbose_name='Specify the price per Hour in TShs.')\n", (4785, 4837), False, 'from django.db import migrations, models\n'), ((4874, 4947), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'verbose_name': '"""Specify the price per Hectare"""'}), "(verbose_name='Specify the price per Hectare')\n", (4901, 4947), False, 'from django.db import migrations, models\n'), ((5482, 5578), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Do your Accept our Terms and Conditions?"""'}), "(default=False, verbose_name=\n 'Do your Accept our Terms and Conditions?')\n", (5501, 5578), False, 'from django.db import migrations, models\n'), ((5603, 5718), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('pending', 'Pending'), ('approved', 'Approved')]", 'default': '"""pending"""', 'max_length': '(100)'}), "(choices=[('pending', 'Pending'), ('approved', 'Approved')],\n default='pending', max_length=100)\n", (5619, 5718), False, 'from django.db import migrations, models\n'), ((5898, 5994), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (5915, 5994), False, 'from django.db import migrations, models\n'), ((6135, 6228), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (6151, 6228), False, 'from django.db import migrations, models\n'), ((6252, 6284), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (6268, 6284), False, 'from django.db import migrations, models\n'), ((6316, 6418), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""equipments.ImplementCategory"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'equipments.ImplementCategory')\n", (6333, 6418), False, 'from django.db import migrations, models\n'), ((6651, 6744), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (6667, 6744), False, 'from django.db import migrations, models\n'), ((6771, 6810), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (6791, 6810), False, 'from django.db import migrations, models\n'), ((6842, 6877), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (6862, 6877), False, 'from django.db import migrations, models\n'), ((6905, 6978), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Name/Models of Implement"""'}), "(max_length=100, verbose_name='Name/Models of Implement')\n", (6921, 6978), False, 'from django.db import migrations, models\n'), ((7007, 7109), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'help_text': '"""SI UNITS in metre"""', 'verbose_name': '"""Width of the Implement"""'}), "(help_text='SI UNITS in metre', verbose_name=\n 'Width of the Implement')\n", (7034, 7109), False, 'from django.db import migrations, models\n'), ((7134, 7234), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'help_text': '"""SI UNITS in KG"""', 'verbose_name': '"""Weight of the Implement"""'}), "(help_text='SI UNITS in KG', verbose_name=\n 'Weight of the Implement')\n", (7161, 7234), False, 'from django.db import migrations, models\n'), ((7267, 7459), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(None, 'Please Select'), ('tractor drive', 'Tractor drive'), (\n 'self-propelled', 'Self-propelled')]", 'max_length': '(100)', 'verbose_name': '"""What is mode of operation?"""'}), "(choices=[(None, 'Please Select'), ('tractor drive',\n 'Tractor drive'), ('self-propelled', 'Self-propelled')], max_length=100,\n verbose_name='What is mode of operation?')\n", (7283, 7459), False, 'from django.db import migrations, models\n'), ((7478, 7570), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'verbose_name': '"""What is Horse Power required for Operation?"""'}), "(verbose_name=\n 'What is Horse Power required for Operation?')\n", (7505, 7570), False, 'from django.db import migrations, models\n'), ((7607, 7704), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""What is Hydaulic capacity required to lift?"""'}), "(max_length=100, verbose_name=\n 'What is Hydaulic capacity required to lift?')\n", (7623, 7704), False, 'from django.db import migrations, models\n'), ((7731, 7794), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'verbose_name': '"""Do you have an operator(s)?"""'}), "(verbose_name='Do you have an operator(s)?')\n", (7750, 7794), False, 'from django.db import migrations, models\n'), ((7822, 8011), 'django.db.models.FileField', 'models.FileField', ([], {'help_text': '"""Upload quality picture of real implement you have, only 5 pictures."""', 'upload_to': '"""implements_photos/"""', 'verbose_name': '"""Upload the Implement pictures"""'}), "(help_text=\n 'Upload quality picture of real implement you have, only 5 pictures.',\n upload_to='implements_photos/', verbose_name=\n 'Upload the Implement pictures')\n", (7838, 8011), False, 'from django.db import migrations, models\n'), ((8039, 8107), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Describe your Implement"""'}), "(blank=True, verbose_name='Describe your Implement')\n", (8055, 8107), False, 'from django.db import migrations, models\n'), ((8141, 8211), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'verbose_name': '"""Specify the price per Hour"""'}), "(verbose_name='Specify the price per Hour')\n", (8168, 8211), False, 'from django.db import migrations, models\n'), ((8248, 8321), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'verbose_name': '"""Specify the price per Hectare"""'}), "(verbose_name='Specify the price per Hectare')\n", (8275, 8321), False, 'from django.db import migrations, models\n'), ((8356, 8452), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Do your Accept our Terms and Conditions?"""'}), "(default=False, verbose_name=\n 'Do your Accept our Terms and Conditions?')\n", (8375, 8452), False, 'from django.db import migrations, models\n'), ((8477, 8592), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('pending', 'Pending'), ('approved', 'Approved')]", 'default': '"""pending"""', 'max_length': '(100)'}), "(choices=[('pending', 'Pending'), ('approved', 'Approved')],\n default='pending', max_length=100)\n", (8493, 8592), False, 'from django.db import migrations, models\n'), ((8956, 9052), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (8973, 9052), False, 'from django.db import migrations, models\n'), ((5778, 5798), 'django.db.models.SET', 'models.SET', (['"""others"""'], {}), "('others')\n", (5788, 5798), False, 'from django.db import migrations, models\n'), ((8648, 8668), 'django.db.models.SET', 'models.SET', (['"""others"""'], {}), "('others')\n", (8658, 8668), False, 'from django.db import migrations, models\n'), ((8815, 8835), 'django.db.models.SET', 'models.SET', (['"""others"""'], {}), "('others')\n", (8825, 8835), False, 'from django.db import migrations, models\n')] |
import pytest
import gen
from dcos_installer import cli
def test_default_arg_parser():
parser = cli.get_argument_parser().parse_args([])
assert parser.verbose is False
assert parser.port == 9000
assert parser.action == 'genconf'
def test_set_arg_parser():
argument_parser = cli.get_argument_parser()
def parse_args(arg_list):
return argument_parser.parse_args(arg_list)
parser = parse_args(['-v', '-p 12345'])
assert parser.verbose is True
assert parser.port == 12345
parser = parse_args(['--web'])
assert parser.action == 'web'
parser = parse_args(['--genconf'])
assert parser.action == 'genconf'
parser = parse_args(['--preflight'])
assert parser.action == 'preflight'
parser = parse_args(['--postflight'])
assert parser.action == 'postflight'
parser = parse_args(['--deploy'])
assert parser.action == 'deploy'
parser = parse_args(['--validate-config'])
assert parser.action == 'validate-config'
parser = parse_args(['--hash-password', 'foo'])
assert parser.password == '<PASSWORD>'
assert parser.action == 'hash-password'
parser = parse_args(['--hash-password'])
assert parser.password is None
assert parser.action == 'hash-password'
parser = parse_args(['--set-superuser-password', 'foo'])
assert parser.password == '<PASSWORD>'
assert parser.action == 'set-superuser-password'
parser = parse_args(['--set-superuser-password'])
assert parser.password is None
assert parser.action == 'set-superuser-password'
parser = parse_args(['--generate-node-upgrade-script', 'fake'])
assert parser.installed_cluster_version == 'fake'
assert parser.action == 'generate-node-upgrade-script'
# Can't do two at once
with pytest.raises(SystemExit):
parse_args(['--validate', '--hash-password', 'foo'])
def test_stringify_config():
stringify = gen.stringify_configuration
# Basic cases pass right through
assert dict() == stringify(dict())
assert {"foo": "bar"} == stringify({"foo": "bar"})
assert {"a": "b", "c": "d"} == stringify({"a": "b", "c": "d"})
# booleans are converted to lower case true / false
assert {"a": "true"} == stringify({"a": True})
assert {"a": "false"} == stringify({"a": False})
assert {"a": "b", "c": "false"} == stringify({"a": "b", "c": False})
# integers are made into strings
assert {"a": "1"} == stringify({"a": 1})
assert {"a": "4123"} == stringify({"a": 4123})
assert {"a": "b", "c": "9999"} == stringify({"a": "b", "c": 9999})
# Dict and list are converted to JSON
assert {"a": '["b"]'} == stringify({"a": ['b']})
assert {"a": '["b\\"a"]'} == stringify({"a": ['b"a']})
assert {"a": '[1]'} == stringify({"a": [1]})
assert {"a": '[1, 2, 3, 4]'} == stringify({"a": [1, 2, 3, 4]})
assert {"a": '[true, false]'} == stringify({"a": [True, False]})
assert {"a": '{"b": "c"}'} == stringify({"a": {"b": "c"}})
assert {"a": '{"b": 1}'} == stringify({"a": {"b": 1}})
assert {"a": '{"b": true}'} == stringify({"a": {"b": True}})
assert {"a": '{"b": null}'} == stringify({"a": {"b": None}})
# Random types produce an error.
with pytest.raises(Exception):
stringify({"a": set()})
# All the handled types at once
assert {
"a": "b",
"c": "true",
"d": "1",
"e": "[1]",
"f": '{"g": "h"}'
} == stringify({"a": "b", "c": True, "d": 1, "e": [1], "f": {"g": "h"}})
| [
"pytest.raises",
"dcos_installer.cli.get_argument_parser"
]
| [((299, 324), 'dcos_installer.cli.get_argument_parser', 'cli.get_argument_parser', ([], {}), '()\n', (322, 324), False, 'from dcos_installer import cli\n'), ((1781, 1806), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (1794, 1806), False, 'import pytest\n'), ((3221, 3245), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3234, 3245), False, 'import pytest\n'), ((103, 128), 'dcos_installer.cli.get_argument_parser', 'cli.get_argument_parser', ([], {}), '()\n', (126, 128), False, 'from dcos_installer import cli\n')] |
import hashlib
import unittest
from colicoords.cell import Cell, CellList
from colicoords.preprocess import data_to_cells
from test import testcase
from test.test_functions import load_testdata
class DataTest(testcase.ArrayTestCase):
def setUp(self):
self.data = load_testdata('ds1')
def test_data_slicing(self):
sl1 = self.data[2:5, :, :]
self.assertEqual(sl1.shape, (3, 512, 512))
sl2 = self.data[:, 20:40, 100:200]
self.assertEqual(sl2.shape, (10, 20, 100))
def test_data_copy(self):
m0 = self.data.binary_img.mean()
data_copy = self.data.copy()
self.assertEqual(m0, self.data.binary_img.mean())
data_copy.data_dict['binary'] += 20
self.assertEqual(m0, self.data.binary_img.mean())
self.assertEqual(data_copy.binary_img.mean(), m0 + 20)
def _test_cell_list(self):
#todo check order
print(hashlib.md5(self.data).hexdigest())
cell_list = data_to_cells(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')
print(hashlib.md5(self.data).hexdigest())
cell_list = data_to_cells(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')
print(hashlib.md5(self.data).hexdigest())
d = self.data.copy()
print(d == self.data)
cl = CellList(cell_list)
self.assertEqual(len(cl), 48)
c5 = cl[5]
self.assertIsInstance(c5, Cell)
del cl[5]
self.assertEqual(len(cl), 47)
self.assertTrue(cl[3] in cl)
cl.append(c5)
self.assertTrue(c5 in cl)
vol = cl.volume
self.assertEqual(len(vol), 48)
class CellListTest(testcase.ArrayTestCase):
def setUp(self):
data = load_testdata('ds1')
self.cell_list = data_to_cells(data)
def test_slicing(self):
sliced = self.cell_list[:5]
self.assertIsInstance(sliced, CellList)
if __name__ == '__main__':
unittest.main() | [
"test.test_functions.load_testdata",
"hashlib.md5",
"colicoords.preprocess.data_to_cells",
"colicoords.cell.CellList",
"unittest.main"
]
| [((2004, 2019), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2017, 2019), False, 'import unittest\n'), ((287, 307), 'test.test_functions.load_testdata', 'load_testdata', (['"""ds1"""'], {}), "('ds1')\n", (300, 307), False, 'from test.test_functions import load_testdata\n'), ((1005, 1077), 'colicoords.preprocess.data_to_cells', 'data_to_cells', (['self.data'], {'initial_crop': '(2)', 'cell_frac': '(0.5)', 'rotate': '"""binary"""'}), "(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')\n", (1018, 1077), False, 'from colicoords.preprocess import data_to_cells\n'), ((1150, 1222), 'colicoords.preprocess.data_to_cells', 'data_to_cells', (['self.data'], {'initial_crop': '(2)', 'cell_frac': '(0.5)', 'rotate': '"""binary"""'}), "(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')\n", (1163, 1222), False, 'from colicoords.preprocess import data_to_cells\n'), ((1353, 1372), 'colicoords.cell.CellList', 'CellList', (['cell_list'], {}), '(cell_list)\n', (1361, 1372), False, 'from colicoords.cell import Cell, CellList\n'), ((1783, 1803), 'test.test_functions.load_testdata', 'load_testdata', (['"""ds1"""'], {}), "('ds1')\n", (1796, 1803), False, 'from test.test_functions import load_testdata\n'), ((1830, 1849), 'colicoords.preprocess.data_to_cells', 'data_to_cells', (['data'], {}), '(data)\n', (1843, 1849), False, 'from colicoords.preprocess import data_to_cells\n'), ((948, 970), 'hashlib.md5', 'hashlib.md5', (['self.data'], {}), '(self.data)\n', (959, 970), False, 'import hashlib\n'), ((1093, 1115), 'hashlib.md5', 'hashlib.md5', (['self.data'], {}), '(self.data)\n', (1104, 1115), False, 'import hashlib\n'), ((1238, 1260), 'hashlib.md5', 'hashlib.md5', (['self.data'], {}), '(self.data)\n', (1249, 1260), False, 'import hashlib\n')] |
# --------------------------------------------------------------------------- #
# Importing section
# --------------------------------------------------------------------------- #
import os
import sys
import argparse
import logging
import json
from classes.alerts import SlackClient
from influxdb import InfluxDBClient
from classes.data_manager import DataManager
# --------------------------------------------------------------------------- #
# Functions
# -----------------------------------------------------------------------------#
def slack_msg():
slack_client = SlackClient(logger, cfg)
if bool(dm.files_not_correctly_handled):
str_err = ''
for k in dm.files_not_correctly_handled:
str_err = '%sFailed handling of file %s; Exception: %s\n' % (str_err, k, dm.files_not_correctly_handled[k])
slack_client.send_alert_message('OZONE FORECASTER - RAW FILES ALARM:\n%s' % str_err, '#ff0000')
else:
slack_client.send_alert_message('OZONE FORECASTER - RAW FILES PROPERLY HANDLED', '#00ff00')
# --------------------------------------------------------------------------- #
# Main
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
# --------------------------------------------------------------------------- #
# Configuration file
# --------------------------------------------------------------------------- #
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-c", help="configuration file")
arg_parser.add_argument("-l", help="log file (optional, if empty log redirected on stdout)")
args = arg_parser.parse_args()
config_file = args.c
if os.path.isfile(config_file) is False:
print('\nATTENTION! Unable to open configuration file %s\n' % config_file)
sys.exit(1)
cfg = json.loads(open(args.c).read())
conns_cfg = json.loads(open(cfg['connectionsFile']).read())
cfg.update(conns_cfg)
# --------------------------------------------------------------------------- #
# Set logging object
# --------------------------------------------------------------------------- #
if not args.l:
log_file = None
else:
log_file = args.l
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)-15s::%(levelname)s::%(funcName)s::%(message)s', level=logging.INFO,
filename=log_file)
# --------------------------------------------------------------------------- #
# Starting program
# --------------------------------------------------------------------------- #
logger.info("Starting program")
# --------------------------------------------------------------------------- #
# InfluxDB connection
# --------------------------------------------------------------------------- #
logger.info('Connection to InfluxDb server on socket [%s:%s]' % (cfg['influxDB']['host'], cfg['influxDB']['port']))
try:
influx_client = InfluxDBClient(host=cfg['influxDB']['host'], port=cfg['influxDB']['port'],
password=cfg['influxDB']['password'], username=cfg['influxDB']['user'],
database=cfg['influxDB']['database'], ssl=cfg['influxDB']['ssl'])
except Exception as e:
logger.error('EXCEPTION: %s' % str(e))
sys.exit(3)
logger.info('Connection successful')
dm = DataManager(influx_client, cfg, logger)
# Download files from the FTP server
if cfg['ftp']['enabled'] is True:
logger.info('Download data from FTP server')
dm.open_ftp_connection()
dm.download_remote_files()
# Insert data into InfluxDB
if cfg['influxDB']['dataImporting'] is True:
logger.info('Importing in InfluxDB of raw data related to files in %s' % cfg['ftp']['localFolders']['tmp'])
dm.insert_data()
# Delete files correctly handled on the FTP server and close the FTP connection
if cfg['ftp']['enabled'] is True:
if cfg['ftp']['deleteRemoteFile'] is True:
logger.info('Delete handled files from FTP server')
dm.delete_remote_files()
dm.close_ftp_connection()
# Slack alert
if cfg['alerts']['slack']['enabled'] is True:
slack_msg()
logger.info("Ending program")
| [
"logging.getLogger",
"logging.basicConfig",
"influxdb.InfluxDBClient",
"classes.data_manager.DataManager",
"argparse.ArgumentParser",
"classes.alerts.SlackClient",
"os.path.isfile",
"sys.exit"
]
| [((578, 602), 'classes.alerts.SlackClient', 'SlackClient', (['logger', 'cfg'], {}), '(logger, cfg)\n', (589, 602), False, 'from classes.alerts import SlackClient\n'), ((1457, 1482), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1480, 1482), False, 'import argparse\n'), ((2270, 2289), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2287, 2289), False, 'import logging\n'), ((2294, 2429), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)-15s::%(levelname)s::%(funcName)s::%(message)s"""', 'level': 'logging.INFO', 'filename': 'log_file'}), "(format=\n '%(asctime)-15s::%(levelname)s::%(funcName)s::%(message)s', level=\n logging.INFO, filename=log_file)\n", (2313, 2429), False, 'import logging\n'), ((3456, 3495), 'classes.data_manager.DataManager', 'DataManager', (['influx_client', 'cfg', 'logger'], {}), '(influx_client, cfg, logger)\n', (3467, 3495), False, 'from classes.data_manager import DataManager\n'), ((1709, 1736), 'os.path.isfile', 'os.path.isfile', (['config_file'], {}), '(config_file)\n', (1723, 1736), False, 'import os\n'), ((1838, 1849), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1846, 1849), False, 'import sys\n'), ((3020, 3240), 'influxdb.InfluxDBClient', 'InfluxDBClient', ([], {'host': "cfg['influxDB']['host']", 'port': "cfg['influxDB']['port']", 'password': "cfg['influxDB']['password']", 'username': "cfg['influxDB']['user']", 'database': "cfg['influxDB']['database']", 'ssl': "cfg['influxDB']['ssl']"}), "(host=cfg['influxDB']['host'], port=cfg['influxDB']['port'],\n password=cfg['influxDB']['password'], username=cfg['influxDB']['user'],\n database=cfg['influxDB']['database'], ssl=cfg['influxDB']['ssl'])\n", (3034, 3240), False, 'from influxdb import InfluxDBClient\n'), ((3393, 3404), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (3401, 3404), False, 'import sys\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import re
DEBUG = False
def merge_str_literal(text: str) -> str:
def _on_match(m: re.Match):
return m.group().replace('"+"', '')
return re.sub(r'".+?"(\+".+?")+ ', _on_match, text)
lines = """
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
""".strip().splitlines()
stack = []
items = []
for line in lines:
if line.startswith('globals'):
stack.append('globals')
elif line.startswith('endglobals'):
stack.pop(-1)
stack.append('endglobals')
elif line.startswith('function'):
stack.append('function')
elif line.startswith('endfunction'):
stack.pop(-1)
stack.append('endfunction')
elif line.startswith('loop'):
stack.append('loop')
elif line.startswith('endloop'):
stack.pop(-1)
stack.append('endloop')
elif line.startswith('if'):
stack.append('if')
elif line.startswith('elseif'):
stack.pop(-1)
stack.append('elseif')
elif line.startswith('else'):
stack.pop(-1)
stack.append('else')
elif line.startswith('endif'):
stack.pop(-1)
stack.append('endif')
else:
stack.append(line[:8] + '...')
indent = len(stack) - 1
line = merge_str_literal(line)
items.append(' ' * indent + line)
DEBUG and print(f'{indent}. {line!r}', stack)
# Add empty line after endglobals and endfunction
if line.startswith('endglobals') or line.startswith('endfunction'):
items.append('')
if stack[-1] not in ['globals', 'function', 'loop', 'if', 'elseif', 'else']:
stack.pop(-1)
new_text = '\n'.join(items).strip()
print(new_text)
"""
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
"""
| [
"re.sub"
]
| [((231, 275), 're.sub', 're.sub', (['"""".+?"(\\\\+".+?")+ """', '_on_match', 'text'], {}), '(\'".+?"(\\\\+".+?")+ \', _on_match, text)\n', (237, 275), False, 'import re\n')] |
from common.commons import *
DATA_PATH = os.environ["DATA_PATH"]
def core():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
# pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0]
pattern[root+'/'+size+'/'+cluster]= members[0]
pattern
from pairs import shapePairs
matches = shapePairs()
# 'FFmpeg','curl','nginx','openssl','redis','tmux','vlc']
matches = matches[matches.file.apply(lambda x: x in list(pattern.values()) or not ( x.startswith('linux_') or x.startswith('FFmpeg_') or x.startswith('curl_') or x.startswith('nginx_') or x.startswith('openssl_') or x.startswith('redis_') or x.startswith('tmux_') or x.startswith('vlc_')))]
from pairs import createPairs
createPairs(matches)
# # # elif job == 'importShapesPairs':
from pairs import importShape
importShape()
def checkWrongMembers():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
sizeDict = {}
for s in [(i,os.path.getsize(join(clusterPath, root, size, cluster,i))) for i in members]:
sizeDict[s[1]] = s[0]
sizeDict
if len(sizeDict) > 1:
print(join(clusterPath, root, size, cluster))
print(sizeDict.values())
def cluster():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
# pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0]
pattern[root+'/'+size+'/'+cluster]= members[0]
pattern
pairsPath = join(DATA_PATH, 'pairs')
from abstractPatch import loadPairMulti
for root in roots:
matches =loadPairMulti(root,'','shapes')
matches
sizes = matches['sizes'].unique().tolist()
for s in sizes:
match = matches[matches['sizes'] == s]
match
clusterCore(pattern,clusterPath, 'shapes', match, pairsPath, root, s, '')
def clusterCore(pattern,clusterPath, level, match, pairsPath, root, s,action ,token=''):
col_combi = match.tuples.values.tolist()
import networkx
g = networkx.Graph(col_combi)
cluster = []
for subgraph in networkx.connected_component_subgraphs(g):
logging.info('Cluster size %d',len(subgraph.nodes()))
cluster.append(subgraph.nodes())
cluster
pathMapping = dict()
if level == 'actions':
indexFile = join(pairsPath, root, s,action+'.index')
elif level == 'shapes':
indexFile = join(pairsPath, root, s + '.index')
else:
indexFile =join(pairsPath, root, s,action,token+'.index')
df = pd.read_csv(indexFile, header=None, usecols=[0, 1], index_col=[0])
pathMapping = df.to_dict()
workList = []
exportCLusters ={}
if not os.path.exists(join(clusterPath, root, s)):
print()
existingClusters = 0
else:
existingClusters = len(listdir(join(clusterPath, root, s)))
for clus in cluster:
members = [pathMapping[1][int(i)] for i in clus]
members
potentialClusters = [(key, value) for key, value in pattern.items() if key.startswith(root + '/' + s)]
potentialClusters
foundExisting = False
for pc,pcMember in potentialClusters:
if pcMember in members:
pc
foundExisting = True
exportCLusters[pc.split('/')[-1]] = members
if not foundExisting:
exportCLusters[existingClusters] = members
existingClusters= existingClusters+1
exportCLusters
for k,v in exportCLusters.items():
for f in v:
t = f, root, level, clusterPath, s, action, token, k
workList.append(t)
# for idx, clus in enumerate(cluster):
# logging.info('exporting cluster %s %s %s %d', root,s,action,idx)
# for f in clus:
# dumpFile = pathMapping[1][int(f)]
#
# t = dumpFile,root,level,clusterPath,s,action,token,idx
# workList.append(t)
from abstractPatch import dumpFilesCore
parallelRun(dumpFilesCore,workList)
# for wl in workList:
# dumpFilesCore(wl)
| [
"networkx.connected_component_subgraphs",
"abstractPatch.loadPairMulti",
"networkx.Graph",
"pairs.shapePairs",
"pairs.createPairs",
"pairs.importShape"
]
| [((850, 862), 'pairs.shapePairs', 'shapePairs', ([], {}), '()\n', (860, 862), False, 'from pairs import shapePairs\n'), ((1263, 1283), 'pairs.createPairs', 'createPairs', (['matches'], {}), '(matches)\n', (1274, 1283), False, 'from pairs import createPairs\n'), ((1365, 1378), 'pairs.importShape', 'importShape', ([], {}), '()\n', (1376, 1378), False, 'from pairs import importShape\n'), ((3625, 3650), 'networkx.Graph', 'networkx.Graph', (['col_combi'], {}), '(col_combi)\n', (3639, 3650), False, 'import networkx\n'), ((3688, 3729), 'networkx.connected_component_subgraphs', 'networkx.connected_component_subgraphs', (['g'], {}), '(g)\n', (3726, 3729), False, 'import networkx\n'), ((3184, 3217), 'abstractPatch.loadPairMulti', 'loadPairMulti', (['root', '""""""', '"""shapes"""'], {}), "(root, '', 'shapes')\n", (3197, 3217), False, 'from abstractPatch import loadPairMulti\n')] |
# -*- coding: utf-8 -*-
import os
import sys
import tensorflow as tf
import numpy as np
import data_utils
from translate import Transliteration
from flask import Flask, request, jsonify
transliteration = Transliteration()
app = Flask(__name__) # Flask 객체 선언, 파라미터로 어플리케이션 패키지의 이름을 넣어 준다.
app.config['JSON_AS_ASCII'] = False # 한글 데이터 전송을 위해서 설정해 준다.
@app.route("/transliterate", methods=['GET'])
def transliterate():
input = request.args.get('input')
output = transliteration.run(input)
learned = transliteration.is_learned(input)
print(input, learned)
return jsonify(output)
if __name__ == "__main__":
app.run(debug = True, host='0.0.0.0', port=80, use_reloader=False)
| [
"flask.jsonify",
"flask.request.args.get",
"flask.Flask",
"translate.Transliteration"
]
| [((205, 222), 'translate.Transliteration', 'Transliteration', ([], {}), '()\n', (220, 222), False, 'from translate import Transliteration\n'), ((230, 245), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (235, 245), False, 'from flask import Flask, request, jsonify\n'), ((431, 456), 'flask.request.args.get', 'request.args.get', (['"""input"""'], {}), "('input')\n", (447, 456), False, 'from flask import Flask, request, jsonify\n'), ((577, 592), 'flask.jsonify', 'jsonify', (['output'], {}), '(output)\n', (584, 592), False, 'from flask import Flask, request, jsonify\n')] |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import datetime
import logging
import os
import sys
import statistics
from volttron.platform.vip.agent import Agent, RPC, Core
from volttron.platform.agent import utils
from volttron.platform.agent.utils import get_aware_utc_now
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '1.0'
def log_statistics(config_path, **kwargs):
"""Load the LogStatisticsAgent agent configuration and returns and instance
of the agent created using that configuration.
:param config_path: Path to a configuration file.
:type config_path: str
:returns: LogStatisticsAgent agent instance
:rtype: LogStatisticsAgent agent
"""
config = utils.load_config(config_path)
return LogStatisticsAgent(config, **kwargs)
class LogStatisticsAgent(Agent):
"""
LogStatisticsAgent reads volttron.log file size every hour,
compute the size delta from previous hour and publish the difference
with timestamp. It also publishes standard deviation every 24 hours.
:param config: Configuration dict
:type config: dict
Example configuration:
.. code-block:: python
{
"file_path" : "/home/volttron/volttron.log",
"analysis_interval_sec" : 60,
"publish_topic" : "platform/log_statistics",
"historian_topic" : "analysis/log_statistics"
}
"""
def __init__(self, config, **kwargs):
super(LogStatisticsAgent, self).__init__(**kwargs)
self.analysis_interval_sec = config["analysis_interval_sec"]
self.file_path = config["file_path"]
self.publish_topic = config["publish_topic"]
self.historian_topic = config["historian_topic"]
self.size_delta_list = []
self.file_start_size = None
self.prev_file_size = None
self._scheduled_event = None
@Core.receiver('onstart')
def starting(self, sender, **kwargs):
_log.info("Starting " + self.__class__.__name__ + " agent")
self.publish_analysis()
def publish_analysis(self):
"""
Publishes file's size increment in previous time interval (60 minutes)
with timestamp.
Also publishes standard deviation of file's hourly size differences
every 24 hour.
"""
if self._scheduled_event is not None:
self._scheduled_event.cancel()
if self.prev_file_size is None:
self.prev_file_size = self.get_file_size()
_log.debug("init_file_size = {}".format(self.prev_file_size))
else:
# read file size
curr_file_size = self.get_file_size()
# calculate size delta
size_delta = curr_file_size - self.prev_file_size
self.prev_file_size = curr_file_size
self.size_delta_list.append(size_delta)
headers = {'Date': datetime.datetime.utcnow().isoformat() + 'Z'}
publish_message = {'timestamp': datetime.datetime.utcnow().isoformat() + 'Z',
'log_size_delta': size_delta}
historian_message = [{"log_size_delta ": size_delta},
{"log_size_delta ": {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}}]
if len(self.size_delta_list) == 24:
standard_deviation = statistics.stdev(self.size_delta_list)
publish_message['log_std_dev'] = standard_deviation
historian_message[0]['log_std_dev'] = standard_deviation
historian_message[1]['log_std_dev'] = {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}
_log.debug('publishing message {} with header {} on historian topic {}'
.format(historian_message, headers, self.historian_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.historian_topic, headers = headers,
message=historian_message)
self.size_delta_list = []
_log.debug('publishing message {} on topic {}'.format(publish_message, self.publish_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.publish_topic,
message=publish_message)
_log.debug('Scheduling next periodic call')
now = get_aware_utc_now()
next_update_time = now + datetime.timedelta(
seconds=self.analysis_interval_sec)
self._scheduled_event = self.core.schedule(
next_update_time, self.publish_analysis)
def get_file_size(self):
try:
return os.path.getsize(self.file_path)
except OSError as e:
_log.error(e)
def main(argv=sys.argv):
"""Main method called by the platform."""
utils.vip_main(log_statistics, identity='platform.logstatisticsagent')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| [
"logging.getLogger",
"os.path.getsize",
"statistics.stdev",
"datetime.datetime.utcnow",
"volttron.platform.agent.utils.vip_main",
"volttron.platform.agent.utils.load_config",
"volttron.platform.vip.agent.Core.receiver",
"datetime.timedelta",
"volttron.platform.agent.utils.setup_logging",
"volttron.platform.agent.utils.get_aware_utc_now"
]
| [((2132, 2153), 'volttron.platform.agent.utils.setup_logging', 'utils.setup_logging', ([], {}), '()\n', (2151, 2153), False, 'from volttron.platform.agent import utils\n'), ((2161, 2188), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2178, 2188), False, 'import logging\n'), ((2578, 2608), 'volttron.platform.agent.utils.load_config', 'utils.load_config', (['config_path'], {}), '(config_path)\n', (2595, 2608), False, 'from volttron.platform.agent import utils\n'), ((3702, 3726), 'volttron.platform.vip.agent.Core.receiver', 'Core.receiver', (['"""onstart"""'], {}), "('onstart')\n", (3715, 3726), False, 'from volttron.platform.vip.agent import Agent, RPC, Core\n'), ((6599, 6669), 'volttron.platform.agent.utils.vip_main', 'utils.vip_main', (['log_statistics'], {'identity': '"""platform.logstatisticsagent"""'}), "(log_statistics, identity='platform.logstatisticsagent')\n", (6613, 6669), False, 'from volttron.platform.agent import utils\n'), ((6146, 6165), 'volttron.platform.agent.utils.get_aware_utc_now', 'get_aware_utc_now', ([], {}), '()\n', (6163, 6165), False, 'from volttron.platform.agent.utils import get_aware_utc_now\n'), ((6199, 6253), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'self.analysis_interval_sec'}), '(seconds=self.analysis_interval_sec)\n', (6217, 6253), False, 'import datetime\n'), ((6435, 6466), 'os.path.getsize', 'os.path.getsize', (['self.file_path'], {}), '(self.file_path)\n', (6450, 6466), False, 'import os\n'), ((5166, 5204), 'statistics.stdev', 'statistics.stdev', (['self.size_delta_list'], {}), '(self.size_delta_list)\n', (5182, 5204), False, 'import statistics\n'), ((4712, 4738), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4736, 4738), False, 'import datetime\n'), ((4803, 4829), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4827, 4829), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
"""
Global app forms
"""
# Standard Library
import re
# Django Library
from django import forms
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
# Thirdparty Library
from dal import autocomplete
# Localfolder Library
from ..models import PyCompany, PyCountry, PyUser
from .partner import PartnerForm
class PerfilForm(forms.ModelForm):
"""Class to update the user profile on the system
"""
class Meta:
model = PyUser
fields = (
'first_name',
'last_name',
'celular',
)
labels = {
'first_name': _('Name'),
'last_name': _('Last Name'),
'celular': _('Mobile Phone'),
}
widgets = {
'first_name': forms.TextInput(attrs={'class': 'form-control'}),
'last_name': forms.TextInput(attrs={'class': 'form-control'}),
'celular': forms.TextInput(attrs={'class': 'form-control'}),
}
class PersonaChangeForm(UserChangeForm):
"""for something will be
"""
class Meta(UserChangeForm.Meta):
model = PyUser
fields = (
'email',
'is_superuser',
'is_staff',
'is_active',
'last_login',
'date_joined',
'first_name',
'last_name',
)
# ========================================================================== #
class PasswordRecoveryForm(forms.ModelForm):
"""To send the account recovery correction
"""
class Meta():
model = PyUser
fields = (
'email',
)
widgets = {
'email': forms.EmailInput(
attrs={'class': 'form-control', 'placeholder': _('Email')}
),
}
# ========================================================================== #
class PasswordSetForm(forms.Form):
"""To send the account recovery correction
"""
password1 = forms.CharField(
widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': _('Password')}
)
)
password2 = forms.CharField(
widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': _('Retype password')}
)
)
def clean(self):
super().clean()
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
print('entre8888')
if password1 != password2:
raise forms.ValidationError(
_('The two password fields didn\'t match.')
)
if password1 != password2:
raise forms.ValidationError(
_('The two password fields didn\'t match.')
)
class PersonaCreationForm(UserCreationForm):
"""This form class renders the record sheet of
users
"""
class Meta(UserCreationForm.Meta):
model = PyUser
fields = (
'email',
)
widgets = {
'email': forms.EmailInput(
attrs={'class': 'form-control', 'placeholder': _('Email')}
),
}
class AvatarForm(forms.ModelForm):
"""Class to update the user profile on the system
"""
class Meta:
model = PyUser
fields = (
'avatar',
)
class InitForm(forms.ModelForm):
"""From of OMegaERP initializacion
"""
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
'placeholder': _('Admin email')
}
)
)
password = forms.CharField(
max_length=100,
widget=forms.PasswordInput(
attrs={
'placeholder': _('Admin Password')
}
)
)
class Meta:
model = PyCompany
fields = [
'name',
'country',
'email',
'password'
]
labels = {
'name': _('Company Name'),
'country': _('Country'),
'email': _('Admin user email'),
'password': _('Password'),
}
widgets = {
'name': forms.TextInput(
attrs={
'class': 'form-control',
'data-placeholder': _('Company Name'),
'style': 'width: 100%',
},
),
'country': autocomplete.ModelSelect2(
url='PyCountry:autocomplete',
attrs={
'class': 'form-control',
'data-placeholder': _('Select a country...'),
'style': 'width: 100%',
},
),
'email': forms.EmailInput(
attrs={
'class': 'form-control',
'data-placeholder': _('Admin user email'),
'style': 'width: 100%',
},
),
}
class ActivateForm(forms.Form):
"""To activate or deactivate an object in OmegaERP
"""
object_name = forms.CharField(max_length=100, widget=forms.HiddenInput)
object_pk = forms.IntegerField(widget=forms.HiddenInput) | [
"django.utils.translation.ugettext_lazy",
"django.forms.IntegerField",
"django.forms.CharField",
"django.forms.TextInput"
]
| [((5135, 5192), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)', 'widget': 'forms.HiddenInput'}), '(max_length=100, widget=forms.HiddenInput)\n', (5150, 5192), False, 'from django import forms\n'), ((5209, 5253), 'django.forms.IntegerField', 'forms.IntegerField', ([], {'widget': 'forms.HiddenInput'}), '(widget=forms.HiddenInput)\n', (5227, 5253), False, 'from django import forms\n'), ((691, 700), 'django.utils.translation.ugettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (692, 700), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((727, 741), 'django.utils.translation.ugettext_lazy', '_', (['"""Last Name"""'], {}), "('Last Name')\n", (728, 741), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((766, 783), 'django.utils.translation.ugettext_lazy', '_', (['"""Mobile Phone"""'], {}), "('Mobile Phone')\n", (767, 783), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((841, 889), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (856, 889), False, 'from django import forms\n'), ((916, 964), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (931, 964), False, 'from django import forms\n'), ((989, 1037), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1004, 1037), False, 'from django import forms\n'), ((4040, 4057), 'django.utils.translation.ugettext_lazy', '_', (['"""Company Name"""'], {}), "('Company Name')\n", (4041, 4057), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4082, 4094), 'django.utils.translation.ugettext_lazy', '_', (['"""Country"""'], {}), "('Country')\n", (4083, 4094), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4117, 4138), 'django.utils.translation.ugettext_lazy', '_', (['"""Admin user email"""'], {}), "('Admin user email')\n", (4118, 4138), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4164, 4177), 'django.utils.translation.ugettext_lazy', '_', (['"""Password"""'], {}), "('Password')\n", (4165, 4177), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2622, 2664), 'django.utils.translation.ugettext_lazy', '_', (['"""The two password fields didn\'t match."""'], {}), '("The two password fields didn\'t match.")\n', (2623, 2664), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2772, 2814), 'django.utils.translation.ugettext_lazy', '_', (['"""The two password fields didn\'t match."""'], {}), '("The two password fields didn\'t match.")\n', (2773, 2814), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1814, 1824), 'django.utils.translation.ugettext_lazy', '_', (['"""Email"""'], {}), "('Email')\n", (1815, 1824), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2150, 2163), 'django.utils.translation.ugettext_lazy', '_', (['"""Password"""'], {}), "('Password')\n", (2151, 2163), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2309, 2329), 'django.utils.translation.ugettext_lazy', '_', (['"""Retype password"""'], {}), "('Retype password')\n", (2310, 2329), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3180, 3190), 'django.utils.translation.ugettext_lazy', '_', (['"""Email"""'], {}), "('Email')\n", (3181, 3190), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3602, 3618), 'django.utils.translation.ugettext_lazy', '_', (['"""Admin email"""'], {}), "('Admin email')\n", (3603, 3618), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3792, 3811), 'django.utils.translation.ugettext_lazy', '_', (['"""Admin Password"""'], {}), "('Admin Password')\n", (3793, 3811), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4355, 4372), 'django.utils.translation.ugettext_lazy', '_', (['"""Company Name"""'], {}), "('Company Name')\n", (4356, 4372), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4657, 4681), 'django.utils.translation.ugettext_lazy', '_', (['"""Select a country..."""'], {}), "('Select a country...')\n", (4658, 4681), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4909, 4930), 'django.utils.translation.ugettext_lazy', '_', (['"""Admin user email"""'], {}), "('Admin user email')\n", (4910, 4930), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
from unittest import mock
import pytest
from django.http import HttpRequest
from rest_framework.response import Response
from rest_framework.test import APIClient
from drf_viewset_profiler.middleware import LineProfilerViewSetMiddleware
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture
def mock_http_request():
http_request = HttpRequest()
http_request.method = "GET"
return http_request
@pytest.fixture
def mock_http_response(mock_http_request):
response = Response()
mock_http_request.line_profiler = mock.Mock()
mock_http_request.parser_context = {"view": mock.Mock()}
response.renderer_context = {"request": mock_http_request}
return response
@pytest.fixture
def mock_output_writer(monkeypatch):
mock_output_writer_ = mock.Mock()
monkeypatch.setattr("drf_viewset_profiler.middleware.output_writer.stream", mock_output_writer_)
return mock_output_writer_
@pytest.fixture
def mock_line_profiler_viewset_middleware():
return LineProfilerViewSetMiddleware()
| [
"unittest.mock.Mock",
"rest_framework.test.APIClient",
"rest_framework.response.Response",
"drf_viewset_profiler.middleware.LineProfilerViewSetMiddleware",
"django.http.HttpRequest"
]
| [((286, 297), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (295, 297), False, 'from rest_framework.test import APIClient\n'), ((360, 373), 'django.http.HttpRequest', 'HttpRequest', ([], {}), '()\n', (371, 373), False, 'from django.http import HttpRequest\n'), ((506, 516), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (514, 516), False, 'from rest_framework.response import Response\n'), ((555, 566), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (564, 566), False, 'from unittest import mock\n'), ((792, 803), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (801, 803), False, 'from unittest import mock\n'), ((1010, 1041), 'drf_viewset_profiler.middleware.LineProfilerViewSetMiddleware', 'LineProfilerViewSetMiddleware', ([], {}), '()\n', (1039, 1041), False, 'from drf_viewset_profiler.middleware import LineProfilerViewSetMiddleware\n'), ((615, 626), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (624, 626), False, 'from unittest import mock\n')] |
# This allows for running the example when the repo has been cloned
import sys
from os.path import abspath
sys.path.extend([abspath(".")])
# Example code follows
import logging
import numpy as np
import matplotlib.pyplot as plt
import muDIC.vlab as vlab
import muDIC as dic
"""
This example case runs an experiment where a deformation gradient is used
to deform a synthetically generated speckle, the speckle is then down sampled by a factor of four
and sensor artifacts are included.
The analysis is then performed and the resulting deformation gradient field is compared to the
one used to deform the images
"""
# Set the amount of info printed to terminal during analysis
logging.basicConfig(format='%(name)s:%(levelname)s:%(message)s', level=logging.INFO)
show_results = False
# Define the image you want to analyse
n_imgs = 2
image_shape = (500, 500)
downsample_factor = 4
super_image_shape = tuple(dim * downsample_factor for dim in image_shape)
# Make a speckle image
speckle_image = vlab.rosta_speckle(super_image_shape, dot_size=4, density=0.5, smoothness=2.0)
# Make an image deformed
F = np.array([[1.01,0],[0.01,1.0]])
image_deformer = vlab.imageDeformer_from_defGrad(F)
# Make an image down-sampler including downscaling, fill-factor and sensor grid irregularities
downsampler = vlab.Downsampler(image_shape=super_image_shape, factor=downsample_factor, fill=.95,
pixel_offset_stddev=0.05)
# Make a noise injector producing 2% gaussian additive noise
noise_injector = vlab.noise_injector("gaussian", sigma=.02)
# Make an synthetic image generation pipeline
image_generator = vlab.SyntheticImageGenerator(speckle_image=speckle_image, image_deformer=image_deformer,
downsampler=downsampler, noise_injector=noise_injector, n=n_imgs)
# Put it into an image stack
image_stack = dic.ImageStack(image_generator)
# Now, make a mesh. Make sure to use enough elements
mesher = dic.Mesher(deg_n=3, deg_e=3,type="spline")
#mesh = mesher.mesh(image_stack) # Use this if you want to use a GUI
mesh = mesher.mesh(image_stack,Xc1=50,Xc2=450,Yc1=50,Yc2=450,n_ely=8,n_elx=8, GUI=False)
# Prepare the analysis input and initiate the analysis
input = dic.DICInput(mesh, image_stack)
input.tol = 1e-6
input.interpolation_order = 4
dic_job = dic.DICAnalysis(input)
results = dic_job.run()
# Calculate the fields for later use. Seed is used when spline elements are used and upscale is used for Q4.
fields = dic.Fields(results, seed=101,upscale=10)
# We will now compare the results from the analysis to the deformation gradient which the image was deformed by
if show_results:
plt.figure()
plt.imshow(F[0,0] - fields.F()[0, 0,0, :, :, 1], cmap=plt.cm.magma)
plt.xlabel("Element e-coordinate")
plt.ylabel("Element n-coordinate")
plt.colorbar()
plt.title("Difference in deformation gradient component 0,0 within the element")
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
#line1 = ax1.plot(res_field[:, 50], label="correct")
line2 = ax1.plot(fields.F()[0, 0,0, :, 50, 1], label="DIC")
ax1.set_xlabel("element e-coordinate")
ax1.set_ylabel("Deformation gradient component 0,0 []")
ax2 = fig1.add_subplot(111, sharex=ax1, frameon=False)
line3 = ax2.plot(F[0,0] - fields.F()[0, 0,0, :, 50, 1], "r--", label="difference")
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel("Deviation []")
plt.title("Deformation gradient component 0,0")
fig1.legend()
plt.show()
| [
"matplotlib.pyplot.ylabel",
"muDIC.vlab.SyntheticImageGenerator",
"numpy.array",
"muDIC.ImageStack",
"muDIC.Mesher",
"matplotlib.pyplot.xlabel",
"muDIC.DICAnalysis",
"matplotlib.pyplot.title",
"muDIC.vlab.rosta_speckle",
"muDIC.DICInput",
"matplotlib.pyplot.show",
"logging.basicConfig",
"matplotlib.pyplot.colorbar",
"muDIC.Fields",
"matplotlib.pyplot.figure",
"muDIC.vlab.imageDeformer_from_defGrad",
"muDIC.vlab.Downsampler",
"muDIC.vlab.noise_injector",
"os.path.abspath"
]
| [((679, 768), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(name)s:%(levelname)s:%(message)s"""', 'level': 'logging.INFO'}), "(format='%(name)s:%(levelname)s:%(message)s', level=\n logging.INFO)\n", (698, 768), False, 'import logging\n'), ((998, 1076), 'muDIC.vlab.rosta_speckle', 'vlab.rosta_speckle', (['super_image_shape'], {'dot_size': '(4)', 'density': '(0.5)', 'smoothness': '(2.0)'}), '(super_image_shape, dot_size=4, density=0.5, smoothness=2.0)\n', (1016, 1076), True, 'import muDIC.vlab as vlab\n'), ((1108, 1142), 'numpy.array', 'np.array', (['[[1.01, 0], [0.01, 1.0]]'], {}), '([[1.01, 0], [0.01, 1.0]])\n', (1116, 1142), True, 'import numpy as np\n'), ((1157, 1191), 'muDIC.vlab.imageDeformer_from_defGrad', 'vlab.imageDeformer_from_defGrad', (['F'], {}), '(F)\n', (1188, 1191), True, 'import muDIC.vlab as vlab\n'), ((1302, 1416), 'muDIC.vlab.Downsampler', 'vlab.Downsampler', ([], {'image_shape': 'super_image_shape', 'factor': 'downsample_factor', 'fill': '(0.95)', 'pixel_offset_stddev': '(0.05)'}), '(image_shape=super_image_shape, factor=downsample_factor,\n fill=0.95, pixel_offset_stddev=0.05)\n', (1318, 1416), True, 'import muDIC.vlab as vlab\n'), ((1522, 1565), 'muDIC.vlab.noise_injector', 'vlab.noise_injector', (['"""gaussian"""'], {'sigma': '(0.02)'}), "('gaussian', sigma=0.02)\n", (1541, 1565), True, 'import muDIC.vlab as vlab\n'), ((1630, 1793), 'muDIC.vlab.SyntheticImageGenerator', 'vlab.SyntheticImageGenerator', ([], {'speckle_image': 'speckle_image', 'image_deformer': 'image_deformer', 'downsampler': 'downsampler', 'noise_injector': 'noise_injector', 'n': 'n_imgs'}), '(speckle_image=speckle_image, image_deformer=\n image_deformer, downsampler=downsampler, noise_injector=noise_injector,\n n=n_imgs)\n', (1658, 1793), True, 'import muDIC.vlab as vlab\n'), ((1875, 1906), 'muDIC.ImageStack', 'dic.ImageStack', (['image_generator'], {}), '(image_generator)\n', (1889, 1906), True, 'import muDIC as dic\n'), ((1970, 2013), 'muDIC.Mesher', 'dic.Mesher', ([], {'deg_n': '(3)', 'deg_e': '(3)', 'type': '"""spline"""'}), "(deg_n=3, deg_e=3, type='spline')\n", (1980, 2013), True, 'import muDIC as dic\n'), ((2236, 2267), 'muDIC.DICInput', 'dic.DICInput', (['mesh', 'image_stack'], {}), '(mesh, image_stack)\n', (2248, 2267), True, 'import muDIC as dic\n'), ((2326, 2348), 'muDIC.DICAnalysis', 'dic.DICAnalysis', (['input'], {}), '(input)\n', (2341, 2348), True, 'import muDIC as dic\n'), ((2492, 2533), 'muDIC.Fields', 'dic.Fields', (['results'], {'seed': '(101)', 'upscale': '(10)'}), '(results, seed=101, upscale=10)\n', (2502, 2533), True, 'import muDIC as dic\n'), ((2668, 2680), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2678, 2680), True, 'import matplotlib.pyplot as plt\n'), ((2757, 2791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Element e-coordinate"""'], {}), "('Element e-coordinate')\n", (2767, 2791), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2830), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Element n-coordinate"""'], {}), "('Element n-coordinate')\n", (2806, 2830), True, 'import matplotlib.pyplot as plt\n'), ((2835, 2849), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2847, 2849), True, 'import matplotlib.pyplot as plt\n'), ((2854, 2939), 'matplotlib.pyplot.title', 'plt.title', (['"""Difference in deformation gradient component 0,0 within the element"""'], {}), "('Difference in deformation gradient component 0,0 within the element'\n )\n", (2863, 2939), True, 'import matplotlib.pyplot as plt\n'), ((2947, 2959), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2957, 2959), True, 'import matplotlib.pyplot as plt\n'), ((3471, 3518), 'matplotlib.pyplot.title', 'plt.title', (['"""Deformation gradient component 0,0"""'], {}), "('Deformation gradient component 0,0')\n", (3480, 3518), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3550, 3552), True, 'import matplotlib.pyplot as plt\n'), ((124, 136), 'os.path.abspath', 'abspath', (['"""."""'], {}), "('.')\n", (131, 136), False, 'from os.path import abspath\n')] |
import urllib.request
from bs4 import BeautifulSoup
import csv
import requests
import os
import json
import time
import glob
files = glob.glob("/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/json/*.json")
for i in range(len(files)):
file = files[i]
file_id = file.split("/")[-1].replace(".json", "")
opath = "/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/curation/"+file_id+".json"
if not os.path.exists(opath):
fw = open(opath, 'w')
curation_data = {}
curation_uri = "curation:"+file_id+".json"
with open(file) as f:
try:
df = json.load(f)
except:
continue
anno_count = 1
if "sequences" in df:
print(file)
members = []
canvases = df["sequences"][0]["canvases"]
for j in range(len(canvases)):
canvas = canvases[j]
if "otherContent" in canvas:
id = canvas["otherContent"][0]["@id"]
headers = {"content-type": "application/json"}
# time.sleep(0.5)
r = requests.get(id, headers=headers)
data = r.json()
print(id)
resources = data["resources"]
for resource in resources:
member_id = resource["on"]
res = resource["resource"]
chars = res["chars"]
member = {
"@id": member_id,
"@type": "sc:Canvas",
"label": "[Annotation " + str(anno_count) + "]",
"description": chars,
"metadata": [
{
"label": res["@type"],
"value": chars
}
]
}
anno_count += 1
members.append(member)
if len(members) > 0:
label = ""
if "label" in df:
label = df["label"]
curation_data = {
"@context": [
"http://iiif.io/api/presentation/2/context.json",
"http://codh.rois.ac.jp/iiif/curation/1/context.json"
],
"@type": "cr:Curation",
"@id": curation_uri,
"label": "Automatic curation by IIIF Converter",
"selections": [
{
"@id": curation_uri + "/range1",
"@type": "sc:Range",
"label": "Automatic curation by IIIF Converter",
"members": members,
"within": {
"@id": df["@id"],
"@type": "sc:Manifest",
"label": label
}
}
]
}
json.dump(curation_data, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
| [
"os.path.exists",
"requests.get",
"glob.glob",
"json.load",
"json.dump"
]
| [((134, 221), 'glob.glob', 'glob.glob', (['"""/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/json/*.json"""'], {}), "(\n '/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/json/*.json')\n", (143, 221), False, 'import glob\n'), ((434, 455), 'os.path.exists', 'os.path.exists', (['opath'], {}), '(opath)\n', (448, 455), False, 'import os\n'), ((3528, 3630), 'json.dump', 'json.dump', (['curation_data', 'fw'], {'ensure_ascii': '(False)', 'indent': '(4)', 'sort_keys': '(True)', 'separators': "(',', ': ')"}), "(curation_data, fw, ensure_ascii=False, indent=4, sort_keys=True,\n separators=(',', ': '))\n", (3537, 3630), False, 'import json\n'), ((636, 648), 'json.load', 'json.load', (['f'], {}), '(f)\n', (645, 648), False, 'import json\n'), ((1218, 1251), 'requests.get', 'requests.get', (['id'], {'headers': 'headers'}), '(id, headers=headers)\n', (1230, 1251), False, 'import requests\n')] |
import numpy as np
from sawyer.mujoco.tasks.base import ComposableTask
class TransitionTask(ComposableTask):
"""
Task to pick up an object with the robot gripper.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self):
pass
def compute_reward(self, obs, info):
return 0
def is_success(self, obs, info=None, init=None):
raise NotImplementedError
def is_terminate(self, obs, init):
return self.is_success(obs, init=init)
def is_fail(self, obs):
raise NotImplementedError
def reset(self):
pass
@property
def completion_bonus(self):
return self._completion_bonus
class TransitionPickTask(TransitionTask):
"""
Task to pick up an object with the robot gripper.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self,
success_thresh=0.05,
object_lift_target=0.3,
completion_bonus=0):
self._success_thresh = success_thresh
self._obj_lift_target = object_lift_target
self._completion_bonus = completion_bonus
self._t = 0
def is_success(self, obs, info=None, init=None):
return True
if init:
self.reset()
goal = obs[11:14] + np.array([0, 0, 0.04])
box_pos = obs[4:7]
d = np.linalg.norm(box_pos - goal, axis=-1)
print("****[pick/is success] box_pos:{}, goal:{}, d:{}".format(box_pos, goal, d))
return d < self._success_thresh
def is_fail(self, obs):
self._t += 1
if self._t >= 1 and not self.is_success(obs):
return True
return False
def reset(self):
self._t = 0
class TransitionPlaceTask(TransitionTask):
"""
Task to place object at a desired location.
"""
def __init__(self,
success_thresh=0.015,
completion_bonus=0):
self._success_thresh = success_thresh
self._completion_bonus = completion_bonus
self._prev_box_pos = None
def is_success(self, obs, info=None, init=None):
if init:
self.reset()
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.03
abs_diff = abs(box_pos - goal)
print("****[place/is success] abs_diff:{}".format(abs_diff))
return ( abs_diff[0] < max_xy_diff and
abs_diff[1] < max_xy_diff and
box_pos[2] < 0.21 )
def is_fail(self, obs):
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.03
abs_diff = abs(box_pos - goal)
if self._prev_box_pos is None:
self._prev_box_pos = box_pos
else:
max_z_diff = 0.009
z_diff = self._prev_box_pos[2] - box_pos[2]
print("****[place/is_fail] z_diff:{}, box_pos_z:{}".format(z_diff, box_pos[2]))
print(self._prev_box_pos[2], box_pos[2])
if abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff:
return True
else:
self._prev_box_pos = box_pos
return False
def reset(self):
self._prev_box_pos = None
class TransitionPickAndPlaceTask(TransitionTask):
"""
Task to pick up an object and place the object at a desired location.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self,
success_thresh=0.01,
completion_bonus=0):
self._success_thresh = success_thresh
self._completion_bonus = completion_bonus
self._prev_box_pos = None
self._picked = False
self._placing = False
def is_success(self, obs, info=None, init=None):
if init:
self.reset()
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.02
abs_diff = abs(box_pos - goal)
print("****[pick&place/is success] abs_diff:{}, box_z:{}".format(abs_diff, box_pos[2]))
return ( abs_diff[0] < max_xy_diff and
abs_diff[1] < max_xy_diff and
box_pos[2] < 0.22 )
def is_fail(self, obs):
box_pos = obs[4:7]
goal = obs[11:14]
abs_diff = abs(box_pos - goal)
max_xy_diff = 0.03
if self._picked:
self._placing = True
print("placing True")
else:
print("placing False")
if self._picked and not self._placing:
print("return True")
return True
self._picked = True
if self._placing:
if self._prev_box_pos is None:
self._prev_box_pos = box_pos
else:
max_z_diff = 0.009
z_diff = self._prev_box_pos[2] - box_pos[2]
print("****[pick&place/is_fail] z_diff:{}, box_pos_z:{}".format(z_diff, box_pos[2]))
print(self._prev_box_pos[2], box_pos[2])
if box_pos[2] < 0.24 and (abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff):
print("return True")
return True
else:
self._prev_box_pos = box_pos
return False
def get_next_primitive(self, obs, prev_primitive):
if prev_primitive == -1:
return 'pick'
return 'place'
def reset(self):
self._picked = False
self._placing = False
self._prev_box_pos = None
| [
"numpy.array",
"numpy.linalg.norm"
]
| [((1431, 1470), 'numpy.linalg.norm', 'np.linalg.norm', (['(box_pos - goal)'], {'axis': '(-1)'}), '(box_pos - goal, axis=-1)\n', (1445, 1470), True, 'import numpy as np\n'), ((1369, 1391), 'numpy.array', 'np.array', (['[0, 0, 0.04]'], {}), '([0, 0, 0.04])\n', (1377, 1391), True, 'import numpy as np\n')] |
import arcade
import os
SPRITE_SCALING = 0.5
SPRITE_NATIVE_SIZE = 128
SPRITE_SIZE = int(SPRITE_NATIVE_SIZE * SPRITE_SCALING)
SCREEN_WIDTH = SPRITE_SIZE * 14
SCREEN_HEIGHT = SPRITE_SIZE * 10
MOVEMENT_SPEED = 5
COIN_SCALE = 0.7
class Room:
"""
This class holds all the information about the
different rooms.
"""
def __init__(self):
# You may want many lists. Lists for coins, monsters, etc.
self.wall_list = None
self.coin_list = None
self.door_list = None
self.smallpotion_list = None
self.bigpotion_list = None
# This holds the background images. If you don't want changing
# background images, you can delete this part.
self.background = None
self.score = 0
def setup_room_1():
"""
Create and return room 1.
If your program gets large, you may want to separate this into different
files.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.wall_list = arcade.SpriteList()
room.door_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up on the right side
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
if not (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
door = arcade.Sprite("fence.png", SPRITE_SCALING)
door.left = x
door.bottom = y
room.door_list.append(door)
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 5 * SPRITE_SIZE
room.wall_list.append(wall)
# If you want coins or monsters in a level, then add that code here.
# Load the background image for this level.
room.background = arcade.load_texture("g.png")
for i in range(300,600,75):
coin = arcade.Sprite("coin.png",COIN_SCALE)
coin.center_x = i
coin.center_y = 500
room.coin_list.append(coin)
smallpotion = arcade.Sprite("big.png",0.05)
smallpotion.center_x = 100
smallpotion.center_y = 900
room.smallpotion_list.append(smallpotion)
return room
def setup_room_2():
"""
Create and return room 2.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.door_list = arcade.SpriteList()
room.wall_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
# -- Set up the walls
# Create bottom and top row of boxes
# This y loops a list of two, the coordinate 0, and just under the top of window
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x != 0:
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 6 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 3 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 5 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom =3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 0.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 7.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 8 * SPRITE_SIZE
room.wall_list.append(wall)
room.background = arcade.load_texture("g.png")
bigpotion = arcade.Sprite("small.png",0.05)
bigpotion.center_x = 800
bigpotion.center_y = 100
room.bigpotion_list.append(bigpotion)
return room
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height):
"""
Initializer
"""
super().__init__(width, height,"Tocate el pnnywise")
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Sprite lists
self.current_room = 0
# Set up the player
self.game_over = False
self.door_list = None
self.rooms = None
self.score = 0
self.coin_list = None
self.player_sprite = None
self.physics_engine = None
self.smallpotion_list = None
self.bigpotion_list = None
def setup(self):
""" Set up the game and initialize the variables. """
# Set up the player
self.player_sprite = arcade.AnimatedWalkingSprite()
self.score = 0
self.coin_list = arcade.SpriteList()
self.smallpotion_list = arcade.SpriteList()
self.bigpotion_list = arcade.SpriteList()
self.player_sprite.center_x = 100
self.player_sprite.center_y = 150
character_scale = 0.75
self.player_sprite.stand_right_textures = []
self.player_sprite.stand_right_textures.append(arcade.load_texture("zombie_stand.png",
scale=character_scale))
self.player_sprite.stand_left_textures = []
self.player_sprite.stand_left_textures.append(arcade.load_texture("zombie_stand.png",
scale=character_scale, mirrored=True))
self.player_sprite.walk_right_textures = []
self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk1.png",
scale=character_scale))
self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk2.png",
scale=character_scale))
self.player_sprite.walk_left_textures = []
self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk1.png",
scale=character_scale, mirrored=True))
self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk2.png",
scale=character_scale, mirrored=True))
# Our list of rooms
self.rooms = []
# Create the rooms. Extend the pattern for each room.
room = setup_room_1()
self.rooms.append(room)
room = setup_room_2()
self.rooms.append(room)
# Our starting room number
self.current_room = 0
# Create a physics engine for this room
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list)
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].door_list)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw the background texture
arcade.draw_texture_rectangle(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2,
SCREEN_WIDTH, SCREEN_HEIGHT, self.rooms[self.current_room].background)
# Draw all the walls in this room
self.rooms[self.current_room].door_list.draw()
self.rooms[self.current_room].wall_list.draw()
self.rooms[self.current_room].coin_list.draw()
self.rooms[self.current_room].bigpotion_list.draw()
self.rooms[self.current_room].smallpotion_list.draw()
# If you have coins or monsters, then copy and modify the line
# above for each list.
output = "Score: {}".format(self.score)
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
self.player_sprite.draw()
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.W:
self.player_sprite.change_y = MOVEMENT_SPEED
elif key == arcade.key.S:
self.player_sprite.change_y = -MOVEMENT_SPEED
elif key == arcade.key.A:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif key == arcade.key.D:
self.player_sprite.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.W or key == arcade.key.S:
self.player_sprite.change_y = 0
elif key == arcade.key.A or key == arcade.key.D:
self.player_sprite.change_x = 0
def update(self, delta_time):
""" Movement and game logic """
self.player_sprite.update_animation()
# Call update on all sprites (The sprites don't do much in this
# example though.)
self.physics_engine.update()
# Do some logic here to figure out what room we are in, and if we need to go
# to a different room.
if self.player_sprite.center_x > SCREEN_WIDTH and self.current_room == 0:
self.current_room = 1
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = 0
elif self.player_sprite.center_x < 0 and self.current_room == 1:
self.current_room = 0
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = SCREEN_WIDTH
hit_list = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].coin_list)
hit_list2 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].bigpotion_list)
hit_list3 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].smallpotion_list)
for coin in hit_list:
coin.kill()
self.score += 1
my_sound = arcade.load_sound("coinsound.wav")
arcade.play_sound(my_sound)
if self.score == 4:
for i in self.rooms[self.current_room].door_list:
i.kill()
your_sound = arcade.load_sound("door.wav")
arcade.play_sound(your_sound)
for smallpotion in hit_list3:
smallpotion.kill()
self.player_sprite.scale=0.5
tu_sound = arcade.load_sound("shrink.wav")
arcade.play_sound(tu_sound)
for bigpotion in hit_list2:
bigpotion.kill()
self.player_sprite.scale=1
yo_sound = arcade.load_sound("grow.wav")
arcade.play_sound(yo_sound)
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT)
window.setup()
arcade.run()
if __name__ == "__main__":
main() | [
"arcade.draw_text",
"arcade.draw_texture_rectangle",
"arcade.check_for_collision_with_list",
"arcade.load_texture",
"arcade.start_render",
"arcade.load_sound",
"os.chdir",
"arcade.PhysicsEngineSimple",
"arcade.AnimatedWalkingSprite",
"arcade.run",
"os.path.abspath",
"arcade.SpriteList",
"arcade.Sprite",
"arcade.play_sound"
]
| [((1087, 1106), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (1104, 1106), False, 'import arcade\n'), ((1129, 1148), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (1146, 1148), False, 'import arcade\n'), ((1171, 1190), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (1188, 1190), False, 'import arcade\n'), ((1225, 1244), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (1242, 1244), False, 'import arcade\n'), ((1272, 1291), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (1289, 1291), False, 'import arcade\n'), ((2599, 2647), 'arcade.Sprite', 'arcade.Sprite', (['"""gravel_dirt.png"""', 'SPRITE_SCALING'], {}), "('gravel_dirt.png', SPRITE_SCALING)\n", (2612, 2647), False, 'import arcade\n'), ((2903, 2931), 'arcade.load_texture', 'arcade.load_texture', (['"""g.png"""'], {}), "('g.png')\n", (2922, 2931), False, 'import arcade\n'), ((3136, 3166), 'arcade.Sprite', 'arcade.Sprite', (['"""big.png"""', '(0.05)'], {}), "('big.png', 0.05)\n", (3149, 3166), False, 'import arcade\n'), ((3494, 3513), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (3511, 3513), False, 'import arcade\n'), ((3536, 3555), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (3553, 3555), False, 'import arcade\n'), ((3578, 3597), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (3595, 3597), False, 'import arcade\n'), ((3627, 3646), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (3644, 3646), False, 'import arcade\n'), ((3674, 3693), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (3691, 3693), False, 'import arcade\n'), ((4698, 4745), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (4711, 4745), False, 'import arcade\n'), ((4861, 4908), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (4874, 4908), False, 'import arcade\n'), ((5024, 5071), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (5037, 5071), False, 'import arcade\n'), ((5189, 5236), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (5202, 5236), False, 'import arcade\n'), ((5354, 5401), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (5367, 5401), False, 'import arcade\n'), ((5523, 5570), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (5536, 5570), False, 'import arcade\n'), ((5688, 5735), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (5701, 5735), False, 'import arcade\n'), ((5853, 5900), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (5866, 5900), False, 'import arcade\n'), ((6018, 6065), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (6031, 6065), False, 'import arcade\n'), ((6183, 6230), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (6196, 6230), False, 'import arcade\n'), ((6348, 6395), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (6361, 6395), False, 'import arcade\n'), ((6513, 6560), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (6526, 6560), False, 'import arcade\n'), ((6678, 6725), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (6691, 6725), False, 'import arcade\n'), ((6843, 6890), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (6856, 6890), False, 'import arcade\n'), ((7008, 7055), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (7021, 7055), False, 'import arcade\n'), ((7173, 7220), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (7186, 7220), False, 'import arcade\n'), ((7338, 7385), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (7351, 7385), False, 'import arcade\n'), ((7502, 7549), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (7515, 7549), False, 'import arcade\n'), ((7667, 7714), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (7680, 7714), False, 'import arcade\n'), ((7832, 7879), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (7845, 7879), False, 'import arcade\n'), ((7999, 8046), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (8012, 8046), False, 'import arcade\n'), ((8164, 8211), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (8177, 8211), False, 'import arcade\n'), ((8329, 8376), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (8342, 8376), False, 'import arcade\n'), ((8494, 8541), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (8507, 8541), False, 'import arcade\n'), ((8659, 8706), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (8672, 8706), False, 'import arcade\n'), ((8824, 8871), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (8837, 8871), False, 'import arcade\n'), ((8990, 9037), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (9003, 9037), False, 'import arcade\n'), ((9156, 9203), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (9169, 9203), False, 'import arcade\n'), ((9322, 9369), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (9335, 9369), False, 'import arcade\n'), ((9488, 9535), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (9501, 9535), False, 'import arcade\n'), ((9653, 9700), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (9666, 9700), False, 'import arcade\n'), ((9819, 9866), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (9832, 9866), False, 'import arcade\n'), ((9985, 10032), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (9998, 10032), False, 'import arcade\n'), ((10152, 10199), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (10165, 10199), False, 'import arcade\n'), ((10317, 10364), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (10330, 10364), False, 'import arcade\n'), ((10482, 10529), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (10495, 10529), False, 'import arcade\n'), ((10654, 10682), 'arcade.load_texture', 'arcade.load_texture', (['"""g.png"""'], {}), "('g.png')\n", (10673, 10682), False, 'import arcade\n'), ((10702, 10734), 'arcade.Sprite', 'arcade.Sprite', (['"""small.png"""', '(0.05)'], {}), "('small.png', 0.05)\n", (10715, 10734), False, 'import arcade\n'), ((18542, 18554), 'arcade.run', 'arcade.run', ([], {}), '()\n', (18552, 18554), False, 'import arcade\n'), ((2981, 3018), 'arcade.Sprite', 'arcade.Sprite', (['"""coin.png"""', 'COIN_SCALE'], {}), "('coin.png', COIN_SCALE)\n", (2994, 3018), False, 'import arcade\n'), ((11440, 11459), 'os.chdir', 'os.chdir', (['file_path'], {}), '(file_path)\n', (11448, 11459), False, 'import os\n'), ((11983, 12013), 'arcade.AnimatedWalkingSprite', 'arcade.AnimatedWalkingSprite', ([], {}), '()\n', (12011, 12013), False, 'import arcade\n'), ((12064, 12083), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (12081, 12083), False, 'import arcade\n'), ((12117, 12136), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (12134, 12136), False, 'import arcade\n'), ((12168, 12187), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (12185, 12187), False, 'import arcade\n'), ((14095, 14187), 'arcade.PhysicsEngineSimple', 'arcade.PhysicsEngineSimple', (['self.player_sprite', 'self.rooms[self.current_room].wall_list'], {}), '(self.player_sprite, self.rooms[self.current_room\n ].wall_list)\n', (14121, 14187), False, 'import arcade\n'), ((14214, 14306), 'arcade.PhysicsEngineSimple', 'arcade.PhysicsEngineSimple', (['self.player_sprite', 'self.rooms[self.current_room].door_list'], {}), '(self.player_sprite, self.rooms[self.current_room\n ].door_list)\n', (14240, 14306), False, 'import arcade\n'), ((14469, 14490), 'arcade.start_render', 'arcade.start_render', ([], {}), '()\n', (14488, 14490), False, 'import arcade\n'), ((14541, 14684), 'arcade.draw_texture_rectangle', 'arcade.draw_texture_rectangle', (['(SCREEN_WIDTH // 2)', '(SCREEN_HEIGHT // 2)', 'SCREEN_WIDTH', 'SCREEN_HEIGHT', 'self.rooms[self.current_room].background'], {}), '(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2,\n SCREEN_WIDTH, SCREEN_HEIGHT, self.rooms[self.current_room].background)\n', (14570, 14684), False, 'import arcade\n'), ((15236, 15292), 'arcade.draw_text', 'arcade.draw_text', (['output', '(10)', '(20)', 'arcade.color.WHITE', '(14)'], {}), '(output, 10, 20, arcade.color.WHITE, 14)\n', (15252, 15292), False, 'import arcade\n'), ((17226, 17328), 'arcade.check_for_collision_with_list', 'arcade.check_for_collision_with_list', (['self.player_sprite', 'self.rooms[self.current_room].coin_list'], {}), '(self.player_sprite, self.rooms[self.\n current_room].coin_list)\n', (17262, 17328), False, 'import arcade\n'), ((17344, 17451), 'arcade.check_for_collision_with_list', 'arcade.check_for_collision_with_list', (['self.player_sprite', 'self.rooms[self.current_room].bigpotion_list'], {}), '(self.player_sprite, self.rooms[self.\n current_room].bigpotion_list)\n', (17380, 17451), False, 'import arcade\n'), ((17467, 17576), 'arcade.check_for_collision_with_list', 'arcade.check_for_collision_with_list', (['self.player_sprite', 'self.rooms[self.current_room].smallpotion_list'], {}), '(self.player_sprite, self.rooms[self.\n current_room].smallpotion_list)\n', (17503, 17576), False, 'import arcade\n'), ((1459, 1507), 'arcade.Sprite', 'arcade.Sprite', (['"""gravel_dirt.png"""', 'SPRITE_SCALING'], {}), "('gravel_dirt.png', SPRITE_SCALING)\n", (1472, 1507), False, 'import arcade\n'), ((4014, 4061), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (4027, 4061), False, 'import arcade\n'), ((11404, 11429), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (11419, 11429), False, 'import os\n'), ((12416, 12478), 'arcade.load_texture', 'arcade.load_texture', (['"""zombie_stand.png"""'], {'scale': 'character_scale'}), "('zombie_stand.png', scale=character_scale)\n", (12435, 12478), False, 'import arcade\n'), ((12657, 12734), 'arcade.load_texture', 'arcade.load_texture', (['"""zombie_stand.png"""'], {'scale': 'character_scale', 'mirrored': '(True)'}), "('zombie_stand.png', scale=character_scale, mirrored=True)\n", (12676, 12734), False, 'import arcade\n'), ((12916, 12978), 'arcade.load_texture', 'arcade.load_texture', (['"""zombie_walk1.png"""'], {'scale': 'character_scale'}), "('zombie_walk1.png', scale=character_scale)\n", (12935, 12978), False, 'import arcade\n'), ((13103, 13165), 'arcade.load_texture', 'arcade.load_texture', (['"""zombie_walk2.png"""'], {'scale': 'character_scale'}), "('zombie_walk2.png', scale=character_scale)\n", (13122, 13165), False, 'import arcade\n'), ((13345, 13422), 'arcade.load_texture', 'arcade.load_texture', (['"""zombie_walk1.png"""'], {'scale': 'character_scale', 'mirrored': '(True)'}), "('zombie_walk1.png', scale=character_scale, mirrored=True)\n", (13364, 13422), False, 'import arcade\n'), ((13545, 13622), 'arcade.load_texture', 'arcade.load_texture', (['"""zombie_walk2.png"""'], {'scale': 'character_scale', 'mirrored': '(True)'}), "('zombie_walk2.png', scale=character_scale, mirrored=True)\n", (13564, 13622), False, 'import arcade\n'), ((16661, 16753), 'arcade.PhysicsEngineSimple', 'arcade.PhysicsEngineSimple', (['self.player_sprite', 'self.rooms[self.current_room].wall_list'], {}), '(self.player_sprite, self.rooms[self.current_room\n ].wall_list)\n', (16687, 16753), False, 'import arcade\n'), ((17690, 17724), 'arcade.load_sound', 'arcade.load_sound', (['"""coinsound.wav"""'], {}), "('coinsound.wav')\n", (17707, 17724), False, 'import arcade\n'), ((17738, 17765), 'arcade.play_sound', 'arcade.play_sound', (['my_sound'], {}), '(my_sound)\n', (17755, 17765), False, 'import arcade\n'), ((18130, 18161), 'arcade.load_sound', 'arcade.load_sound', (['"""shrink.wav"""'], {}), "('shrink.wav')\n", (18147, 18161), False, 'import arcade\n'), ((18175, 18202), 'arcade.play_sound', 'arcade.play_sound', (['tu_sound'], {}), '(tu_sound)\n', (18192, 18202), False, 'import arcade\n'), ((18344, 18373), 'arcade.load_sound', 'arcade.load_sound', (['"""grow.wav"""'], {}), "('grow.wav')\n", (18361, 18373), False, 'import arcade\n'), ((18387, 18414), 'arcade.play_sound', 'arcade.play_sound', (['yo_sound'], {}), '(yo_sound)\n', (18404, 18414), False, 'import arcade\n'), ((1991, 2039), 'arcade.Sprite', 'arcade.Sprite', (['"""gravel_dirt.png"""', 'SPRITE_SCALING'], {}), "('gravel_dirt.png', SPRITE_SCALING)\n", (2004, 2039), False, 'import arcade\n'), ((2433, 2475), 'arcade.Sprite', 'arcade.Sprite', (['"""fence.png"""', 'SPRITE_SCALING'], {}), "('fence.png', SPRITE_SCALING)\n", (2446, 2475), False, 'import arcade\n'), ((4527, 4574), 'arcade.Sprite', 'arcade.Sprite', (['"""stone_snow.png"""', 'SPRITE_SCALING'], {}), "('stone_snow.png', SPRITE_SCALING)\n", (4540, 4574), False, 'import arcade\n'), ((17000, 17092), 'arcade.PhysicsEngineSimple', 'arcade.PhysicsEngineSimple', (['self.player_sprite', 'self.rooms[self.current_room].wall_list'], {}), '(self.player_sprite, self.rooms[self.current_room\n ].wall_list)\n', (17026, 17092), False, 'import arcade\n'), ((17914, 17943), 'arcade.load_sound', 'arcade.load_sound', (['"""door.wav"""'], {}), "('door.wav')\n", (17931, 17943), False, 'import arcade\n'), ((17961, 17990), 'arcade.play_sound', 'arcade.play_sound', (['your_sound'], {}), '(your_sound)\n', (17978, 17990), False, 'import arcade\n')] |
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import os
import unittest
import re
from . import sysinfo
# Linux/OS X/BSD platforms can implement this by calling out to lsof
if sysinfo.WIN:
def _run_lsof():
raise unittest.SkipTest("lsof not expected on Windows")
else:
def _run_lsof():
import tempfile
pid = os.getpid()
fd, tmpname = tempfile.mkstemp('get_open_files')
os.close(fd)
lsof_command = 'lsof -p %s > %s' % (pid, tmpname)
if os.system(lsof_command):
# XXX: This prints to the console an annoying message: 'lsof is not recognized'
raise unittest.SkipTest("lsof failed")
with open(tmpname) as fobj:
data = fobj.read().strip()
os.remove(tmpname)
return data
def default_get_open_files(pipes=False):
data = _run_lsof()
results = {}
for line in data.split('\n'):
line = line.strip()
if not line or line.startswith("COMMAND"):
# Skip header and blank lines
continue
split = re.split(r'\s+', line)
_command, _pid, _user, fd = split[:4]
# Pipes (on OS X, at least) get an fd like "3" while normal files get an fd like "1u"
if fd[:-1].isdigit() or fd.isdigit():
if not pipes and fd[-1].isdigit():
continue
fd = int(fd[:-1]) if not fd[-1].isdigit() else int(fd)
if fd in results:
params = (fd, line, split, results.get(fd), data)
raise AssertionError('error when parsing lsof output: duplicate fd=%r\nline=%r\nsplit=%r\nprevious=%r\ndata:\n%s' % params)
results[fd] = line
if not results:
raise AssertionError('failed to parse lsof:\n%s' % (data, ))
results['data'] = data
return results
def default_get_number_open_files():
if os.path.exists('/proc/'):
# Linux only
fd_directory = '/proc/%d/fd' % os.getpid()
return len(os.listdir(fd_directory))
try:
return len(get_open_files(pipes=True)) - 1
except (OSError, AssertionError, unittest.SkipTest):
return 0
lsof_get_open_files = default_get_open_files
try:
# psutil import subprocess which on Python 3 imports selectors.
# This can expose issues with monkey-patching.
import psutil
except ImportError:
get_open_files = default_get_open_files
get_number_open_files = default_get_number_open_files
else:
# If psutil is available (it is cross-platform) use that.
# It is *much* faster than shelling out to lsof each time
# (Running 14 tests takes 3.964s with lsof and 0.046 with psutil)
# However, it still doesn't completely solve the issue on Windows: fds are reported
# as -1 there, so we can't fully check those.
def get_open_files():
"""
Return a list of popenfile and pconn objects.
Note that other than `fd`, they have different attributes.
.. important:: If you want to find open sockets, on Windows
and linux, it is important that the socket at least be listening
(socket.listen(1)). Unlike the lsof implementation, this will only
return sockets in a state like that.
"""
results = dict()
process = psutil.Process()
results['data'] = process.open_files() + process.connections('all')
for x in results['data']:
results[x.fd] = x
results['data'] += ['From psutil', process]
return results
def get_number_open_files():
process = psutil.Process()
try:
return process.num_fds()
except AttributeError:
# num_fds is unix only. Is num_handles close enough on Windows?
return 0
| [
"os.path.exists",
"re.split",
"os.listdir",
"os.close",
"psutil.Process",
"unittest.SkipTest",
"os.getpid",
"os.system",
"tempfile.mkstemp",
"os.remove"
]
| [((2976, 3000), 'os.path.exists', 'os.path.exists', (['"""/proc/"""'], {}), "('/proc/')\n", (2990, 3000), False, 'import os\n'), ((1343, 1392), 'unittest.SkipTest', 'unittest.SkipTest', (['"""lsof not expected on Windows"""'], {}), "('lsof not expected on Windows')\n", (1360, 1392), False, 'import unittest\n'), ((1458, 1469), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1467, 1469), False, 'import os\n'), ((1492, 1526), 'tempfile.mkstemp', 'tempfile.mkstemp', (['"""get_open_files"""'], {}), "('get_open_files')\n", (1508, 1526), False, 'import tempfile\n'), ((1535, 1547), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (1543, 1547), False, 'import os\n'), ((1617, 1640), 'os.system', 'os.system', (['lsof_command'], {}), '(lsof_command)\n', (1626, 1640), False, 'import os\n'), ((1868, 1886), 'os.remove', 'os.remove', (['tmpname'], {}), '(tmpname)\n', (1877, 1886), False, 'import os\n'), ((2181, 2203), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {}), "('\\\\s+', line)\n", (2189, 2203), False, 'import re\n'), ((4390, 4406), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (4404, 4406), False, 'import psutil\n'), ((4674, 4690), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (4688, 4690), False, 'import psutil\n'), ((1752, 1784), 'unittest.SkipTest', 'unittest.SkipTest', (['"""lsof failed"""'], {}), "('lsof failed')\n", (1769, 1784), False, 'import unittest\n'), ((3062, 3073), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3071, 3073), False, 'import os\n'), ((3093, 3117), 'os.listdir', 'os.listdir', (['fd_directory'], {}), '(fd_directory)\n', (3103, 3117), False, 'import os\n')] |
from django.contrib.messages.constants import DEFAULT_LEVELS
from user_messages.api import get_messages
def messages(request):
"""
Return a lazy 'messages' context variable as well as
'DEFAULT_MESSAGE_LEVELS'.
"""
return {
"messages": get_messages(request=request),
"DEFAULT_MESSAGE_LEVELS": DEFAULT_LEVELS,
}
| [
"user_messages.api.get_messages"
]
| [((266, 295), 'user_messages.api.get_messages', 'get_messages', ([], {'request': 'request'}), '(request=request)\n', (278, 295), False, 'from user_messages.api import get_messages\n')] |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..events import events_plot
from ..stats import standardize as nk_standardize
def signal_plot(
signal, sampling_rate=None, subplots=False, standardize=False, labels=None, **kwargs
):
"""Plot signal with events as vertical lines.
Parameters
----------
signal : array or DataFrame
Signal array (can be a dataframe with many signals).
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Needs to be supplied if
the data should be plotted over time in seconds. Otherwise the data is plotted over samples.
Defaults to None.
subplots : bool
If True, each signal is plotted in a subplot.
standardize : bool
If True, all signals will have the same scale (useful for visualisation).
labels : str or list
Defaults to None.
**kwargs : optional
Arguments passed to matplotlib plotting.
Examples
----------
>>> import numpy as np
>>> import pandas as pd
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=10, sampling_rate=1000)
>>> nk.signal_plot(signal, sampling_rate=1000, color="red")
>>>
>>> data = pd.DataFrame({"Signal2": np.cos(np.linspace(start=0, stop=20, num=1000)),
... "Signal3": np.sin(np.linspace(start=0, stop=20, num=1000)),
... "Signal4": nk.signal_binarize(np.cos(np.linspace(start=0, stop=40, num=1000)))})
>>> nk.signal_plot(data, labels=['signal_1', 'signal_2', 'signal_3'], subplots=True)
>>> nk.signal_plot([signal, data], standardize=True)
"""
# Sanitize format
if isinstance(signal, list):
try:
for i in signal:
len(i)
except TypeError:
signal = np.array(signal)
if isinstance(signal, pd.DataFrame) is False:
# If list is passed
if isinstance(signal, list) or len(np.array(signal).shape) > 1:
out = pd.DataFrame()
for i, content in enumerate(signal):
if isinstance(content, (pd.DataFrame, pd.Series)):
out = pd.concat([out, content], axis=1, sort=True)
else:
out = pd.concat(
[out, pd.DataFrame({"Signal" + str(i + 1): content})],
axis=1,
sort=True,
)
signal = out
# If vector is passed
else:
signal = pd.DataFrame({"Signal": signal})
# Copy signal
signal = signal.copy()
# Guess continuous and events columns
continuous_columns = list(signal.columns.values)
events_columns = []
for col in signal.columns:
vector = signal[col]
if vector.nunique() == 2:
indices = np.where(vector == np.max(vector.unique()))
if bool(np.any(np.diff(indices) == 1)) is False:
events_columns.append(col)
continuous_columns.remove(col)
# Adjust for sampling rate
if sampling_rate is not None:
signal.index = signal.index / sampling_rate
title_x = "Time (seconds)"
else:
title_x = "Time"
# x_axis = np.linspace(0, signal.shape[0] / sampling_rate, signal.shape[0])
# x_axis = pd.DataFrame(x_axis, columns=["Time (s)"])
# signal = pd.concat([signal, x_axis], axis=1)
# signal = signal.set_index("Time (s)")
# Plot accordingly
if len(events_columns) > 0:
events = []
for col in events_columns:
vector = signal[col]
events.append(np.where(vector == np.max(vector.unique()))[0])
plot = events_plot(events, signal=signal[continuous_columns])
if sampling_rate is None and signal.index.is_integer():
plot.gca().set_xlabel("Samples")
else:
plot.gca().set_xlabel(title_x)
else:
# Aesthetics
colors = [
"#1f77b4",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#7f7f7f",
"#bcbd22",
"#17becf",
]
if len(continuous_columns) > len(colors):
colors = plt.cm.viridis(np.linspace(0, 1, len(continuous_columns)))
# Plot
if standardize is True:
signal[continuous_columns] = nk_standardize(signal[continuous_columns])
if subplots is True:
_, axes = plt.subplots(nrows=len(continuous_columns), ncols=1, sharex=True, **kwargs)
for ax, col, color in zip(axes, continuous_columns, colors):
ax.plot(signal[col], c=color, **kwargs)
else:
plot = signal[continuous_columns].plot(subplots=False, sharex=True, **kwargs)
if sampling_rate is None and signal.index.is_integer():
plt.xlabel("Samples")
else:
plt.xlabel(title_x)
# Tidy legend locations and add labels
if labels is None:
labels = continuous_columns.copy()
if isinstance(labels, str):
n_labels = len([labels])
labels = [labels]
elif isinstance(labels, list):
n_labels = len(labels)
if len(signal[continuous_columns].columns) != n_labels:
raise ValueError(
"NeuroKit error: signal_plot(): number of labels does not equal the number of plotted signals."
)
if subplots is False:
plt.legend(labels, loc=1)
else:
for i, label in enumerate(labels):
axes[i].legend([label], loc=1)
| [
"matplotlib.pyplot.xlabel",
"numpy.diff",
"numpy.array",
"pandas.DataFrame",
"pandas.concat",
"matplotlib.pyplot.legend"
]
| [((5581, 5606), 'matplotlib.pyplot.legend', 'plt.legend', (['labels'], {'loc': '(1)'}), '(labels, loc=1)\n', (5591, 5606), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2093), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2091, 2093), True, 'import pandas as pd\n'), ((2599, 2631), 'pandas.DataFrame', 'pd.DataFrame', (["{'Signal': signal}"], {}), "({'Signal': signal})\n", (2611, 2631), True, 'import pandas as pd\n'), ((5005, 5026), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Samples"""'], {}), "('Samples')\n", (5015, 5026), True, 'import matplotlib.pyplot as plt\n'), ((5053, 5072), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['title_x'], {}), '(title_x)\n', (5063, 5072), True, 'import matplotlib.pyplot as plt\n'), ((1892, 1908), 'numpy.array', 'np.array', (['signal'], {}), '(signal)\n', (1900, 1908), True, 'import numpy as np\n'), ((2236, 2280), 'pandas.concat', 'pd.concat', (['[out, content]'], {'axis': '(1)', 'sort': '(True)'}), '([out, content], axis=1, sort=True)\n', (2245, 2280), True, 'import pandas as pd\n'), ((2032, 2048), 'numpy.array', 'np.array', (['signal'], {}), '(signal)\n', (2040, 2048), True, 'import numpy as np\n'), ((2985, 3001), 'numpy.diff', 'np.diff', (['indices'], {}), '(indices)\n', (2992, 3001), True, 'import numpy as np\n')] |
"""Only one validation per mission, user and actor
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2021-10-14 11:22:01.124488
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
op.execute(
"""
WITH validation_duplicates AS (
SELECT
id,
ROW_NUMBER() OVER (PARTITION BY user_id, mission_id, submitter_id ORDER BY reception_time DESC) AS rn
FROM mission_validation
)
DELETE FROM mission_validation mv
USING validation_duplicates vd
WHERE mv.id = vd.id AND vd.rn >= 2
"""
)
op.execute(
"""
ALTER TABLE mission_validation ADD CONSTRAINT only_one_validation_per_submitter_mission_and_user
EXCLUDE USING GIST (
mission_id WITH =,
submitter_id WITH =,
user_id WITH =
)
"""
)
def downgrade():
op.drop_constraint(
"only_one_validation_per_submitter_mission_and_user",
"mission_validation",
)
| [
"alembic.op.drop_constraint",
"alembic.op.execute"
]
| [((324, 728), 'alembic.op.execute', 'op.execute', (['"""\n WITH validation_duplicates AS (\n SELECT\n id,\n ROW_NUMBER() OVER (PARTITION BY user_id, mission_id, submitter_id ORDER BY reception_time DESC) AS rn\n FROM mission_validation\n )\n DELETE FROM mission_validation mv\n USING validation_duplicates vd\n WHERE mv.id = vd.id AND vd.rn >= 2\n """'], {}), '(\n """\n WITH validation_duplicates AS (\n SELECT\n id,\n ROW_NUMBER() OVER (PARTITION BY user_id, mission_id, submitter_id ORDER BY reception_time DESC) AS rn\n FROM mission_validation\n )\n DELETE FROM mission_validation mv\n USING validation_duplicates vd\n WHERE mv.id = vd.id AND vd.rn >= 2\n """\n )\n', (334, 728), False, 'from alembic import op\n'), ((737, 1009), 'alembic.op.execute', 'op.execute', (['"""\n ALTER TABLE mission_validation ADD CONSTRAINT only_one_validation_per_submitter_mission_and_user\n EXCLUDE USING GIST (\n mission_id WITH =,\n submitter_id WITH =,\n user_id WITH =\n )\n """'], {}), '(\n """\n ALTER TABLE mission_validation ADD CONSTRAINT only_one_validation_per_submitter_mission_and_user\n EXCLUDE USING GIST (\n mission_id WITH =,\n submitter_id WITH =,\n user_id WITH =\n )\n """\n )\n', (747, 1009), False, 'from alembic import op\n'), ((1037, 1135), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""only_one_validation_per_submitter_mission_and_user"""', '"""mission_validation"""'], {}), "('only_one_validation_per_submitter_mission_and_user',\n 'mission_validation')\n", (1055, 1135), False, 'from alembic import op\n')] |
# Copyright 2020 <NAME> (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/python
import os
import sys
import argparse
from rtdb2 import RtDB2Store, RTDB2_DEFAULT_PATH
import rtdb2tools
from hexdump import hexdump
# Main structure of the program
if __name__ == "__main__":
# Argument parsing.
descriptionTxt = 'This tool reads a value from the database given an RtDB key.\n'
exampleTxt = """Example: rtdb2_get.py -a 6 ROBOT_STATE
age: 2h
shared: True
list: False
value: [2, [1581172987, 618438], [0.05368572473526001, -0.2938263416290283, 5.330356597900391], [0.1385340541601181, -0.8020891547203064, 0.7817431688308716], False, [0.0, 0.0], 6, 'A']
Example: rtdb2_get.py -a 2 DIAG_WORLDMODEL_LOCAL -x "['balls'][0]['result']"
[[5.3209381103515625, 0.5837346315383911, 0.15281200408935547], [-0.0029433025047183037, 0.01433953270316124, 1.2758345292240847e-05], 1.0, [22033, 1889585904]]
"""
parser = argparse.ArgumentParser(description=descriptionTxt, epilog=exampleTxt, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--agent', help='agent ID to use', type=int, default=rtdb2tools.guessAgentId())
parser.add_argument('-s', '--serialized', help='also show serialized string (as hexdump)', action='store_true')
parser.add_argument('-p', '--path', help='database path to use', type=str, default=RTDB2_DEFAULT_PATH)
parser.add_argument('-x', '--expression', help='evaluate expression, useful to fetch a specific element', type=str)
parser.add_argument('key', help='RtDB key to read')
args = parser.parse_args()
# Create instance of RtDB2Store and read databases from disk
rtdb2Store = RtDB2Store(args.path)
item = rtdb2Store.get(args.agent, args.key, timeout=None)
if args.expression:
print(eval("item.value" + args.expression))
else:
print(str(item))
if args.serialized:
hexdump(item.value_serialized)
rtdb2Store.closeAll()
| [
"rtdb2tools.guessAgentId",
"rtdb2.RtDB2Store",
"argparse.ArgumentParser",
"hexdump.hexdump"
]
| [((938, 1066), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'descriptionTxt', 'epilog': 'exampleTxt', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=descriptionTxt, epilog=exampleTxt,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n', (961, 1066), False, 'import argparse\n'), ((1687, 1708), 'rtdb2.RtDB2Store', 'RtDB2Store', (['args.path'], {}), '(args.path)\n', (1697, 1708), False, 'from rtdb2 import RtDB2Store, RTDB2_DEFAULT_PATH\n'), ((1915, 1945), 'hexdump.hexdump', 'hexdump', (['item.value_serialized'], {}), '(item.value_serialized)\n', (1922, 1945), False, 'from hexdump import hexdump\n'), ((1147, 1172), 'rtdb2tools.guessAgentId', 'rtdb2tools.guessAgentId', ([], {}), '()\n', (1170, 1172), False, 'import rtdb2tools\n')] |
from __future__ import print_function
from __future__ import division
import os
import gym
import numpy as np
from skimage.transform import resize
from skimage.color import rgb2gray
class Atari(object):
s_dim = [84, 84, 1]
a_dim = 3
def __init__(self, args, record_video=False):
self.env = gym.make('BreakoutNoFrameskip-v4')
self.ale = self.env.env.ale # ale interface
if record_video:
video_dir = os.path.join(args.save_path, 'videos')
if not os.path.exists(video_dir):
os.makedirs(video_dir)
self.env = gym.wrappers.Monitor(
self.env, video_dir, video_callable=lambda x: True, resume=True)
self.ale = self.env.env.env.ale
self.screen_size = Atari.s_dim[:2] # 84x84
self.noop_max = 30
self.frame_skip = 4
self.frame_feq = 4
self.s_dim = Atari.s_dim
self.a_dim = Atari.a_dim
self.action_space = [1, 2, 3] # Breakout specify
self.done = True
def new_round(self):
if not self.done: # dead but not done
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
obs = self.preprocess(obs)
else: # terminal
self.env.reset()
# No-op
for _ in range(np.random.randint(1, self.noop_max + 1)):
obs, _, done, _ = self.env.step(0)
obs = self.preprocess(obs)
return obs
def preprocess(self, observ):
return resize(rgb2gray(observ), self.screen_size)
def step(self, action):
observ, reward, dead = None, 0, False
for _ in range(self.frame_skip):
lives_before = self.ale.lives()
o, r, self.done, _ = self.env.step(self.action_space[action])
lives_after = self.ale.lives()
reward += r
if lives_before > lives_after:
dead = True
break
observ = self.preprocess(o)
observ = np.reshape(observ, newshape=self.screen_size + [1])
self.state = np.append(self.state[:, :, 1:], observ, axis=2)
return self.state, reward, dead, self.done
| [
"os.path.exists",
"skimage.color.rgb2gray",
"numpy.reshape",
"os.makedirs",
"os.path.join",
"numpy.append",
"numpy.random.randint",
"gym.wrappers.Monitor",
"gym.make"
]
| [((315, 349), 'gym.make', 'gym.make', (['"""BreakoutNoFrameskip-v4"""'], {}), "('BreakoutNoFrameskip-v4')\n", (323, 349), False, 'import gym\n'), ((2046, 2097), 'numpy.reshape', 'np.reshape', (['observ'], {'newshape': '(self.screen_size + [1])'}), '(observ, newshape=self.screen_size + [1])\n', (2056, 2097), True, 'import numpy as np\n'), ((2119, 2166), 'numpy.append', 'np.append', (['self.state[:, :, 1:]', 'observ'], {'axis': '(2)'}), '(self.state[:, :, 1:], observ, axis=2)\n', (2128, 2166), True, 'import numpy as np\n'), ((452, 490), 'os.path.join', 'os.path.join', (['args.save_path', '"""videos"""'], {}), "(args.save_path, 'videos')\n", (464, 490), False, 'import os\n'), ((599, 688), 'gym.wrappers.Monitor', 'gym.wrappers.Monitor', (['self.env', 'video_dir'], {'video_callable': '(lambda x: True)', 'resume': '(True)'}), '(self.env, video_dir, video_callable=lambda x: True,\n resume=True)\n', (619, 688), False, 'import gym\n'), ((1563, 1579), 'skimage.color.rgb2gray', 'rgb2gray', (['observ'], {}), '(observ)\n', (1571, 1579), False, 'from skimage.color import rgb2gray\n'), ((510, 535), 'os.path.exists', 'os.path.exists', (['video_dir'], {}), '(video_dir)\n', (524, 535), False, 'import os\n'), ((553, 575), 'os.makedirs', 'os.makedirs', (['video_dir'], {}), '(video_dir)\n', (564, 575), False, 'import os\n'), ((1355, 1394), 'numpy.random.randint', 'np.random.randint', (['(1)', '(self.noop_max + 1)'], {}), '(1, self.noop_max + 1)\n', (1372, 1394), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import time
import mxnet as mx
import numpy as np
from get_data import get_movielens_iter, get_movielens_data
from model import matrix_fact_model_parallel_net
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Run model parallel version of matrix factorization",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-epoch', type=int, default=3,
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=256,
help='number of examples per batch')
parser.add_argument('--print-every', type=int, default=100,
help='logging interval')
parser.add_argument('--factor-size', type=int, default=128,
help="the factor size of the embedding operation")
parser.add_argument('--num-gpus', type=int, default=2,
help="number of gpus to use")
MOVIELENS = {
'dataset': 'ml-10m',
'train': './ml-10M100K/r1.train',
'val': './ml-10M100K/r1.test',
'max_user': 71569,
'max_movie': 65135,
}
if __name__ == '__main__':
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
# arg parser
args = parser.parse_args()
logging.info(args)
num_epoch = args.num_epoch
batch_size = args.batch_size
optimizer = 'sgd'
factor_size = args.factor_size
print_every = args.print_every
num_gpus = args.num_gpus
momentum = 0.9
learning_rate = 0.1
# prepare dataset and iterators
max_user = MOVIELENS['max_user']
max_movies = MOVIELENS['max_movie']
get_movielens_data(MOVIELENS['dataset'])
train_iter = get_movielens_iter(MOVIELENS['train'], batch_size)
val_iter = get_movielens_iter(MOVIELENS['val'], batch_size)
# construct the model
net = matrix_fact_model_parallel_net(factor_size, factor_size, max_user, max_movies)
# construct the module
# map the ctx_group attribute to the context assignment
group2ctxs={'dev1':[mx.cpu()]*num_gpus, 'dev2':[mx.gpu(i) for i in range(num_gpus)]}
# Creating a module by passing group2ctxs attribute which maps
# the ctx_group attribute to the context assignment
mod = mx.module.Module(symbol=net, context=[mx.cpu()]*num_gpus, data_names=['user', 'item'],
label_names=['score'], group2ctxs=group2ctxs)
# the initializer used to initialize the parameters
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
# the parameters for the optimizer constructor
optimizer_params = {
'learning_rate': learning_rate,
'wd': 1e-4,
'momentum': momentum,
'rescale_grad': 1.0/batch_size}
# use MSE as the metric
metric = mx.gluon.metric.create(['MSE'])
speedometer = mx.callback.Speedometer(batch_size, print_every)
# start training
mod.fit(train_iter,
val_iter,
eval_metric = metric,
num_epoch = num_epoch,
optimizer = optimizer,
optimizer_params = optimizer_params,
initializer = initializer,
batch_end_callback = speedometer)
| [
"logging.basicConfig",
"mxnet.callback.Speedometer",
"model.matrix_fact_model_parallel_net",
"argparse.ArgumentParser",
"mxnet.gluon.metric.create",
"mxnet.cpu",
"get_data.get_movielens_data",
"mxnet.init.Xavier",
"mxnet.gpu",
"get_data.get_movielens_iter",
"logging.info"
]
| [((978, 1018), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (997, 1018), False, 'import logging\n'), ((1029, 1184), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run model parallel version of matrix factorization"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Run model parallel version of matrix factorization', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (1052, 1184), False, 'import argparse\n'), ((2009, 2061), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'head'}), '(level=logging.INFO, format=head)\n', (2028, 2061), False, 'import logging\n'), ((2115, 2133), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (2127, 2133), False, 'import logging\n'), ((2486, 2526), 'get_data.get_movielens_data', 'get_movielens_data', (["MOVIELENS['dataset']"], {}), "(MOVIELENS['dataset'])\n", (2504, 2526), False, 'from get_data import get_movielens_iter, get_movielens_data\n'), ((2544, 2594), 'get_data.get_movielens_iter', 'get_movielens_iter', (["MOVIELENS['train']", 'batch_size'], {}), "(MOVIELENS['train'], batch_size)\n", (2562, 2594), False, 'from get_data import get_movielens_iter, get_movielens_data\n'), ((2610, 2658), 'get_data.get_movielens_iter', 'get_movielens_iter', (["MOVIELENS['val']", 'batch_size'], {}), "(MOVIELENS['val'], batch_size)\n", (2628, 2658), False, 'from get_data import get_movielens_iter, get_movielens_data\n'), ((2696, 2774), 'model.matrix_fact_model_parallel_net', 'matrix_fact_model_parallel_net', (['factor_size', 'factor_size', 'max_user', 'max_movies'], {}), '(factor_size, factor_size, max_user, max_movies)\n', (2726, 2774), False, 'from model import matrix_fact_model_parallel_net\n'), ((3306, 3354), 'mxnet.init.Xavier', 'mx.init.Xavier', ([], {'factor_type': '"""in"""', 'magnitude': '(2.34)'}), "(factor_type='in', magnitude=2.34)\n", (3320, 3354), True, 'import mxnet as mx\n'), ((3608, 3639), 'mxnet.gluon.metric.create', 'mx.gluon.metric.create', (["['MSE']"], {}), "(['MSE'])\n", (3630, 3639), True, 'import mxnet as mx\n'), ((3663, 3711), 'mxnet.callback.Speedometer', 'mx.callback.Speedometer', (['batch_size', 'print_every'], {}), '(batch_size, print_every)\n', (3686, 3711), True, 'import mxnet as mx\n'), ((2915, 2924), 'mxnet.gpu', 'mx.gpu', (['i'], {}), '(i)\n', (2921, 2924), True, 'import mxnet as mx\n'), ((2887, 2895), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (2893, 2895), True, 'import mxnet as mx\n'), ((3124, 3132), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (3130, 3132), True, 'import mxnet as mx\n')] |
#!/usr/bin/env python3
import shlex
from tkinter import *
from tkinter import messagebox
from psutil import Popen
top = Tk()
top.title("Franka Gripper Control")
top.geometry("300x75")
def open():
node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 1'))
messagebox.showinfo("Open Gripper", "Gripper Opened")
node_process.terminate()
def close():
node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 0'))
messagebox.showinfo("Close Gripper", "Gripper Closed")
node_process.terminate()
B1 = Button(top, text = "Open Gripper", command = open)
B1.place(x = 30,y = 20)
B2 = Button(top, text = "Close Gripper", command = close)
B2.place(x = 160,y = 20)
top.mainloop()
| [
"shlex.split",
"tkinter.messagebox.showinfo"
]
| [((301, 354), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Open Gripper"""', '"""Gripper Opened"""'], {}), "('Open Gripper', 'Gripper Opened')\n", (320, 354), False, 'from tkinter import messagebox\n'), ((499, 553), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Close Gripper"""', '"""Gripper Closed"""'], {}), "('Close Gripper', 'Gripper Closed')\n", (518, 553), False, 'from tkinter import messagebox\n'), ((222, 298), 'shlex.split', 'shlex.split', (['"""rosrun franka_interactive_controllers libfranka_gripper_run 1"""'], {}), "('rosrun franka_interactive_controllers libfranka_gripper_run 1')\n", (233, 298), False, 'import shlex\n'), ((420, 496), 'shlex.split', 'shlex.split', (['"""rosrun franka_interactive_controllers libfranka_gripper_run 0"""'], {}), "('rosrun franka_interactive_controllers libfranka_gripper_run 0')\n", (431, 496), False, 'import shlex\n')] |
#!/usr/bin/env python
"""This tool builds or repacks the client binaries.
This handles invocations for the build across the supported platforms including
handling Visual Studio, pyinstaller and other packaging mechanisms.
"""
import logging
import os
import platform
import time
# pylint: disable=unused-import
from grr.client import client_plugins
# pylint: enable=unused-import
from grr.lib import build
from grr.lib import builders
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import startup
parser = flags.PARSER
# Guess which arch we should be building based on where we are running.
if platform.architecture()[0] == "32bit":
default_arch = "i386"
else:
default_arch = "amd64"
default_platform = platform.system().lower()
parser.add_argument(
"--platform", choices=["darwin", "linux", "windows"],
default=default_platform,
help="The platform to build or repack for. This will default to "
"the current platform: %s." % platform.system())
parser.add_argument(
"--arch", choices=["amd64", "i386"],
default=default_arch,
help="The architecture to build or repack for.")
# Guess which package format we should be building based on where we are
# running.
if default_platform == "linux":
distro = platform.linux_distribution()[0]
if distro in ["Ubuntu", "debian"]:
default_package = "deb"
elif distro in ["CentOS Linux", "CentOS", "centos", "redhat", "fedora"]:
default_package = "rpm"
else:
default_package = None
elif default_platform == "darwin":
default_package = "dmg"
elif default_platform == "windows":
default_package = "exe"
parser.add_argument(
"--package_format", choices=["deb", "rpm"],
default=default_package,
help="The packaging format to use when building a Linux client.")
# Initialize sub parsers and their arguments.
subparsers = parser.add_subparsers(
title="subcommands", dest="subparser_name", description="valid subcommands")
# Build arguments.
parser_build = subparsers.add_parser(
"build", help="Build a client from source.")
parser_repack = subparsers.add_parser(
"repack", help="Repack a zip file into an installer (Only useful when "
"signing).")
parser_repack.add_argument("--template", default=None,
help="The template zip file to repack.")
parser_repack.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_repack.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_repack.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_repack.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy = subparsers.add_parser(
"deploy", help="Build a deployable self installer from a package.")
parser_deploy.add_argument("--template", default=None,
help="The template zip file to deploy.")
parser_deploy.add_argument("--templatedir", default="",
help="Directory containing template zip files to "
"repack. Incompatible with --template")
parser_deploy.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_deploy.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_deploy.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_buildanddeploy = subparsers.add_parser(
"buildanddeploy",
help="Build and deploy clients for multiple labels and architectures.")
parser_buildanddeploy.add_argument("--template", default=None,
help="The template zip file to repack, if "
"none is specified we will build it.")
args = parser.parse_args()
def GetBuilder(context):
"""Get the appropriate builder based on the selected flags."""
try:
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
builder_obj = builders.DarwinClientBuilder
elif args.platform == "windows":
context = ["Platform:Windows"] + context
builder_obj = builders.WindowsClientBuilder
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
builder_obj = builders.LinuxClientBuilder
elif args.package_format == "rpm":
context = ["Platform:Linux", "Target:LinuxRpm"] + context
builder_obj = builders.CentosClientBuilder
else:
parser.error("Couldn't guess packaging format for: %s" %
platform.linux_distribution()[0])
else:
parser.error("Unsupported build platform: %s" % args.platform)
except AttributeError:
raise RuntimeError("Unable to build for platform %s when running "
"on current platform." % args.platform)
return builder_obj(context=context)
def GetDeployer(context):
"""Get the appropriate client deployer based on the selected flags."""
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
deployer_obj = build.DarwinClientDeployer
elif args.platform == "windows":
context = ["Platform:Windows"] + context
deployer_obj = build.WindowsClientDeployer
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
deployer_obj = build.LinuxClientDeployer
else:
context = ["Platform:Linux", "Target:LinuxRpm"] + context
deployer_obj = build.CentosClientDeployer
else:
parser.error("Unsupported build platform: %s" % args.platform)
return deployer_obj(context=context)
def TemplateInputFilename(context):
"""Build template file name from config."""
if args.templatedir:
filename = config_lib.CONFIG.Get("PyInstaller.template_filename",
context=context)
return os.path.join(args.templatedir, filename)
return None
def BuildAndDeploy(context):
"""Run build and deploy to create installers."""
# ISO 8601 date
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S%z")
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
# Output directory like: 2015-02-13T21:48:47-0800/linux_amd64_deb/
spec = "_".join((args.platform, args.arch, args.package_format))
output_dir = os.path.join(config_lib.CONFIG.Get(
"ClientBuilder.executables_path", context=context), timestamp, spec)
# If we weren't passed a template, build one
if args.template:
template_path = args.template
else:
template_path = os.path.join(output_dir, config_lib.CONFIG.Get(
"PyInstaller.template_filename", context=context))
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate(output_file=template_path)
# Get the list of contexts which we should be building.
context_list = config_lib.CONFIG.Get("ClientBuilder.BuildTargets")
logging.info("Building installers for: %s", context_list)
config_orig = config_lib.CONFIG.ExportState()
deployed_list = []
for deploycontext in context_list:
# Add the settings for this context
for newcontext in deploycontext.split(","):
config_lib.CONFIG.AddContext(newcontext)
context.append(newcontext)
try:
# If the ClientBuilder.target_platforms doesn't match our environment,
# skip.
if not config_lib.CONFIG.MatchBuildContext(args.platform, args.arch,
args.package_format):
continue
deployer = GetDeployer(context)
# Make a nicer filename out of the context string.
context_filename = deploycontext.replace(
"AllPlatforms Context,", "").replace(",", "_").replace(" ", "_")
deployed_list.append(context_filename)
output_filename = os.path.join(
output_dir, context_filename,
config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context))
logging.info("Deploying %s as %s with labels: %s", deploycontext,
config_lib.CONFIG.Get(
"Client.name", context=deployer.context),
config_lib.CONFIG.Get(
"Client.labels", context=deployer.context))
deployer.MakeDeployableBinary(template_path, output_filename)
finally:
# Remove the custom settings for the next deploy
for newcontext in deploycontext.split(","):
context.remove(newcontext)
config_lib.ImportConfigManger(config_orig)
logging.info("Complete, installers for %s are in %s", deployed_list,
output_dir)
def main(_):
"""Launch the appropriate builder."""
config_lib.CONFIG.AddContext(
"ClientBuilder Context",
"Context applied when we run the client builder script.")
startup.ClientInit()
# Make sure we have all the secondary configs since they may be set under the
# ClientBuilder Context
for secondconfig in config_lib.CONFIG["ConfigIncludes"]:
config_lib.CONFIG.LoadSecondaryConfig(secondconfig)
# Use basic console output logging so we can see what is happening.
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
logger.handlers = [handler]
# The following is used to change the identity of the builder based on the
# target platform.
context = flags.FLAGS.context
if args.arch == "amd64":
context.append("Arch:amd64")
else:
context.append("Arch:i386")
if args.subparser_name == "build":
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate()
elif args.subparser_name == "repack":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
output_filename = os.path.join(
args.outputdir, config_lib.CONFIG.Get(
"ClientBuilder.output_filename", context=deployer.context))
deployer.RepackInstaller(open(args.template, "rb").read(), args.output or
output_filename)
elif args.subparser_name == "deploy":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
template_path = (args.template or TemplateInputFilename(deployer.context) or
config_lib.CONFIG.Get("ClientBuilder.template_path",
context=deployer.context))
# If neither output filename or output directory is specified,
# use the default location from the config file.
output = None
if args.output:
output = args.output
elif args.outputdir:
# If output filename isn't specified, write to args.outputdir with a
# .deployed extension so we can distinguish it from repacked binaries.
filename = ".".join(
(config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context), "deployed"))
output = os.path.join(args.outputdir, filename)
deployer.MakeDeployableBinary(template_path, output)
elif args.subparser_name == "buildanddeploy":
BuildAndDeploy(context)
if __name__ == "__main__":
flags.StartMain(main)
| [
"logging.getLogger",
"logging.StreamHandler",
"grr.lib.flags.StartMain",
"grr.lib.config_lib.CONFIG.MatchBuildContext",
"grr.lib.config_lib.ImportConfigManger",
"grr.lib.config_lib.CONFIG.ExportState",
"time.strftime",
"os.path.join",
"platform.linux_distribution",
"grr.lib.config_lib.CONFIG.LoadSecondaryConfig",
"platform.architecture",
"platform.system",
"grr.lib.config_lib.CONFIG.Set",
"grr.lib.config_lib.CONFIG.AddContext",
"grr.lib.config_lib.CONFIG.Get",
"grr.lib.startup.ClientInit",
"logging.info"
]
| [((7005, 7041), 'time.strftime', 'time.strftime', (['"""%Y-%m-%dT%H:%M:%S%z"""'], {}), "('%Y-%m-%dT%H:%M:%S%z')\n", (7018, 7041), False, 'import time\n'), ((7800, 7851), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (['"""ClientBuilder.BuildTargets"""'], {}), "('ClientBuilder.BuildTargets')\n", (7821, 7851), False, 'from grr.lib import config_lib\n'), ((7855, 7912), 'logging.info', 'logging.info', (['"""Building installers for: %s"""', 'context_list'], {}), "('Building installers for: %s', context_list)\n", (7867, 7912), False, 'import logging\n'), ((7929, 7960), 'grr.lib.config_lib.CONFIG.ExportState', 'config_lib.CONFIG.ExportState', ([], {}), '()\n', (7958, 7960), False, 'from grr.lib import config_lib\n'), ((9482, 9567), 'logging.info', 'logging.info', (['"""Complete, installers for %s are in %s"""', 'deployed_list', 'output_dir'], {}), "('Complete, installers for %s are in %s', deployed_list, output_dir\n )\n", (9494, 9567), False, 'import logging\n'), ((9635, 9750), 'grr.lib.config_lib.CONFIG.AddContext', 'config_lib.CONFIG.AddContext', (['"""ClientBuilder Context"""', '"""Context applied when we run the client builder script."""'], {}), "('ClientBuilder Context',\n 'Context applied when we run the client builder script.')\n", (9663, 9750), False, 'from grr.lib import config_lib\n'), ((9763, 9783), 'grr.lib.startup.ClientInit', 'startup.ClientInit', ([], {}), '()\n', (9781, 9783), False, 'from grr.lib import startup\n'), ((10088, 10107), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (10105, 10107), False, 'import logging\n'), ((10120, 10143), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (10141, 10143), False, 'import logging\n'), ((12271, 12292), 'grr.lib.flags.StartMain', 'flags.StartMain', (['main'], {}), '(main)\n', (12286, 12292), False, 'from grr.lib import flags\n'), ((624, 647), 'platform.architecture', 'platform.architecture', ([], {}), '()\n', (645, 647), False, 'import platform\n'), ((738, 755), 'platform.system', 'platform.system', ([], {}), '()\n', (753, 755), False, 'import platform\n'), ((1266, 1295), 'platform.linux_distribution', 'platform.linux_distribution', ([], {}), '()\n', (1293, 1295), False, 'import platform\n'), ((6716, 6787), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (['"""PyInstaller.template_filename"""'], {'context': 'context'}), "('PyInstaller.template_filename', context=context)\n", (6737, 6787), False, 'from grr.lib import config_lib\n'), ((6836, 6876), 'os.path.join', 'os.path.join', (['args.templatedir', 'filename'], {}), '(args.templatedir, filename)\n', (6848, 6876), False, 'import os\n'), ((7066, 7119), 'grr.lib.config_lib.CONFIG.Set', 'config_lib.CONFIG.Set', (['"""Client.plugins"""', 'args.plugins'], {}), "('Client.plugins', args.plugins)\n", (7087, 7119), False, 'from grr.lib import config_lib\n'), ((7285, 7357), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (['"""ClientBuilder.executables_path"""'], {'context': 'context'}), "('ClientBuilder.executables_path', context=context)\n", (7306, 7357), False, 'from grr.lib import config_lib\n'), ((9954, 10005), 'grr.lib.config_lib.CONFIG.LoadSecondaryConfig', 'config_lib.CONFIG.LoadSecondaryConfig', (['secondconfig'], {}), '(secondconfig)\n', (9991, 10005), False, 'from grr.lib import config_lib\n'), ((977, 994), 'platform.system', 'platform.system', ([], {}), '()\n', (992, 994), False, 'import platform\n'), ((7538, 7609), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (['"""PyInstaller.template_filename"""'], {'context': 'context'}), "('PyInstaller.template_filename', context=context)\n", (7559, 7609), False, 'from grr.lib import config_lib\n'), ((8114, 8154), 'grr.lib.config_lib.CONFIG.AddContext', 'config_lib.CONFIG.AddContext', (['newcontext'], {}), '(newcontext)\n', (8142, 8154), False, 'from grr.lib import config_lib\n'), ((9436, 9478), 'grr.lib.config_lib.ImportConfigManger', 'config_lib.ImportConfigManger', (['config_orig'], {}), '(config_orig)\n', (9465, 9478), False, 'from grr.lib import config_lib\n'), ((8302, 8389), 'grr.lib.config_lib.CONFIG.MatchBuildContext', 'config_lib.CONFIG.MatchBuildContext', (['args.platform', 'args.arch', 'args.package_format'], {}), '(args.platform, args.arch, args.\n package_format)\n', (8337, 8389), False, 'from grr.lib import config_lib\n'), ((8805, 8890), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (['"""ClientBuilder.output_filename"""'], {'context': 'deployer.context'}), "('ClientBuilder.output_filename', context=deployer.context\n )\n", (8826, 8890), False, 'from grr.lib import config_lib\n'), ((9011, 9073), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (['"""Client.name"""'], {'context': 'deployer.context'}), "('Client.name', context=deployer.context)\n", (9032, 9073), False, 'from grr.lib import config_lib\n'), ((9118, 9182), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (['"""Client.labels"""'], {'context': 'deployer.context'}), "('Client.labels', context=deployer.context)\n", (9139, 9182), False, 'from grr.lib import config_lib\n'), ((10623, 10676), 'grr.lib.config_lib.CONFIG.Set', 'config_lib.CONFIG.Set', (['"""Client.plugins"""', 'args.plugins'], {}), "('Client.plugins', args.plugins)\n", (10644, 10676), False, 'from grr.lib import config_lib\n'), ((10846, 10931), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (['"""ClientBuilder.output_filename"""'], {'context': 'deployer.context'}), "('ClientBuilder.output_filename', context=deployer.context\n )\n", (10867, 10931), False, 'from grr.lib import config_lib\n'), ((11134, 11187), 'grr.lib.config_lib.CONFIG.Set', 'config_lib.CONFIG.Set', (['"""Client.plugins"""', 'args.plugins'], {}), "('Client.plugins', args.plugins)\n", (11155, 11187), False, 'from grr.lib import config_lib\n'), ((11399, 11477), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (['"""ClientBuilder.template_path"""'], {'context': 'deployer.context'}), "('ClientBuilder.template_path', context=deployer.context)\n", (11420, 11477), False, 'from grr.lib import config_lib\n'), ((12066, 12104), 'os.path.join', 'os.path.join', (['args.outputdir', 'filename'], {}), '(args.outputdir, filename)\n', (12078, 12104), False, 'import os\n'), ((11923, 12008), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (['"""ClientBuilder.output_filename"""'], {'context': 'deployer.context'}), "('ClientBuilder.output_filename', context=deployer.context\n )\n", (11944, 12008), False, 'from grr.lib import config_lib\n'), ((5530, 5559), 'platform.linux_distribution', 'platform.linux_distribution', ([], {}), '()\n', (5557, 5559), False, 'import platform\n')] |
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for recsim.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import test_util
from environments.recommenders import recsim_wrapper
from recsim.environments import interest_exploration
class RecommenderTest(absltest.TestCase):
def test_interest_exploration_can_run(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': False,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
def test_interest_exploration_can_run_with_resampling(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': True,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
if __name__ == '__main__':
absltest.main()
| [
"recsim.environments.interest_exploration.create_environment",
"environments.recommenders.recsim_wrapper.RecsimWrapper",
"absl.testing.absltest.main",
"test_util.run_test_simulation"
]
| [((1814, 1829), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (1827, 1829), False, 'from absl.testing import absltest\n'), ((1269, 1305), 'environments.recommenders.recsim_wrapper.RecsimWrapper', 'recsim_wrapper.RecsimWrapper', (['params'], {}), '(params)\n', (1297, 1305), False, 'from environments.recommenders import recsim_wrapper\n'), ((1310, 1366), 'test_util.run_test_simulation', 'test_util.run_test_simulation', ([], {'env': 'env', 'stackelberg': '(True)'}), '(env=env, stackelberg=True)\n', (1339, 1366), False, 'import test_util\n'), ((1685, 1721), 'environments.recommenders.recsim_wrapper.RecsimWrapper', 'recsim_wrapper.RecsimWrapper', (['params'], {}), '(params)\n', (1713, 1721), False, 'from environments.recommenders import recsim_wrapper\n'), ((1726, 1782), 'test_util.run_test_simulation', 'test_util.run_test_simulation', ([], {'env': 'env', 'stackelberg': '(True)'}), '(env=env, stackelberg=True)\n', (1755, 1782), False, 'import test_util\n'), ((1206, 1257), 'recsim.environments.interest_exploration.create_environment', 'interest_exploration.create_environment', (['env_config'], {}), '(env_config)\n', (1245, 1257), False, 'from recsim.environments import interest_exploration\n'), ((1622, 1673), 'recsim.environments.interest_exploration.create_environment', 'interest_exploration.create_environment', (['env_config'], {}), '(env_config)\n', (1661, 1673), False, 'from recsim.environments import interest_exploration\n')] |
import argparse
import csv
import os
from moss_client.core import submit_and_dl, parse_moss_reports
data_folder = 'data'
def handle_input(user_id, base_folder, parse, only_parse, join_file, batch):
global data_folder
abs_path = os.path.abspath(os.path.dirname(__file__))
root_data_folder = os.path.join(abs_path, data_folder)
if not os.path.exists(root_data_folder):
os.makedirs(root_data_folder)
report_links_file = os.path.join(root_data_folder, 'links_to_moss_reports.html')
report_csv_file = os.path.join(root_data_folder, 'moss_report.csv')
if not os.path.isabs(base_folder):
base_folder = os.path.join(abs_path, base_folder)
if len(join_file) > 0:
expected_keys = ["SC_Filepath", "Stackoverflow_Links"]
with open(join_file, mode='r', encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
actual_keys = csv_reader.fieldnames
if expected_keys[0] != actual_keys[0] or expected_keys[1] != actual_keys[1]:
print("Error: Unexpected Headers! SC_Filepath and Stackoverflow_Links are required!")
return -1
if not only_parse:
submit_and_dl(user_id, base_folder, report_links_file, batch)
if parse or only_parse:
print("Parsing the moss reports...")
parse_moss_reports(report_links_file, report_csv_file, join_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="MOSS CLI client for submitting java files to the service and downloading the report from the "
"service locally. Will go through the sub folders of the given folder and submit the java files "
"for plagiarism checks and download the reports locally, creating a linking file in the process")
parser.add_argument('user_id', metavar='U', nargs=1, help="Your user-id for the MOSS service.")
parser.add_argument('folder', metavar='F', nargs=1, help="The folder whose contents you want to submit.")
parser.add_argument('-p', '--parse', action='store_true', help="Parses the moss reports into a csv file.")
parser.add_argument('-o', '--only-parse', action='store_true',
help="Only parses the local moss reports and does not submit files and download the reports. "
"Requires the reports and the links_to_reports html file created normally by this app.")
parser.add_argument('-j', '--join-file', nargs=1, default=[""],
help="When the parse or only-parse option is given, joins the parsed data with the parsed data.")
parser.add_argument('-b', '--batch-mode', action='store_true',
help="Only submits a 100 folders to the Moss Service, also looks for already processed folders so "
"that it does not submit those again.")
args = parser.parse_args()
handle_input(args.user_id[0], args.folder[0], args.parse, args.only_parse, args.join_file[0], args.batch_mode)
| [
"os.path.exists",
"csv.DictReader",
"os.path.isabs",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"os.path.dirname",
"moss_client.core.submit_and_dl",
"moss_client.core.parse_moss_reports"
]
| [((305, 340), 'os.path.join', 'os.path.join', (['abs_path', 'data_folder'], {}), '(abs_path, data_folder)\n', (317, 340), False, 'import os\n'), ((448, 508), 'os.path.join', 'os.path.join', (['root_data_folder', '"""links_to_moss_reports.html"""'], {}), "(root_data_folder, 'links_to_moss_reports.html')\n", (460, 508), False, 'import os\n'), ((531, 580), 'os.path.join', 'os.path.join', (['root_data_folder', '"""moss_report.csv"""'], {}), "(root_data_folder, 'moss_report.csv')\n", (543, 580), False, 'import os\n'), ((1436, 1767), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MOSS CLI client for submitting java files to the service and downloading the report from the service locally. Will go through the sub folders of the given folder and submit the java files for plagiarism checks and download the reports locally, creating a linking file in the process"""'}), "(description=\n 'MOSS CLI client for submitting java files to the service and downloading the report from the service locally. Will go through the sub folders of the given folder and submit the java files for plagiarism checks and download the reports locally, creating a linking file in the process'\n )\n", (1459, 1767), False, 'import argparse\n'), ((255, 280), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (270, 280), False, 'import os\n'), ((352, 384), 'os.path.exists', 'os.path.exists', (['root_data_folder'], {}), '(root_data_folder)\n', (366, 384), False, 'import os\n'), ((394, 423), 'os.makedirs', 'os.makedirs', (['root_data_folder'], {}), '(root_data_folder)\n', (405, 423), False, 'import os\n'), ((592, 618), 'os.path.isabs', 'os.path.isabs', (['base_folder'], {}), '(base_folder)\n', (605, 618), False, 'import os\n'), ((642, 677), 'os.path.join', 'os.path.join', (['abs_path', 'base_folder'], {}), '(abs_path, base_folder)\n', (654, 677), False, 'import os\n'), ((1185, 1246), 'moss_client.core.submit_and_dl', 'submit_and_dl', (['user_id', 'base_folder', 'report_links_file', 'batch'], {}), '(user_id, base_folder, report_links_file, batch)\n', (1198, 1246), False, 'from moss_client.core import submit_and_dl, parse_moss_reports\n'), ((1328, 1393), 'moss_client.core.parse_moss_reports', 'parse_moss_reports', (['report_links_file', 'report_csv_file', 'join_file'], {}), '(report_links_file, report_csv_file, join_file)\n', (1346, 1393), False, 'from moss_client.core import submit_and_dl, parse_moss_reports\n'), ((864, 888), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (878, 888), False, 'import csv\n')] |
#!/usr/bin/python
# pylint: disable=W0223
"""
Get a list of teams
"""
from html.parser import HTMLParser
import requests
class ChkTeams(HTMLParser):
"""
Extract team names from page
"""
def __init__(self):
HTMLParser.__init__(self)
self.retval = []
def handle_starttag(self, tag, attrs):
for apt in attrs:
if apt[0] == 'title':
if apt[1] != "ESPN Search":
self.retval.append(apt[1])
DATALOC = "http://www.espn.com/mens-college-basketball/tournament/bracket"
def check_teams():
"""
Extract a list of teams (schools)
"""
req = requests.get(DATALOC)
parser = ChkTeams()
parser.feed(req.text)
retv = parser.retval
return retv[8:]
def make_team_list():
"""
Call check_teams and stick result in text file
"""
listv = check_teams()
with open('teams.txt', 'w') as ofile:
for team in listv:
ofile.write(team + '\n')
if __name__ == '__main__':
make_team_list()
| [
"html.parser.HTMLParser.__init__",
"requests.get"
]
| [((640, 661), 'requests.get', 'requests.get', (['DATALOC'], {}), '(DATALOC)\n', (652, 661), False, 'import requests\n'), ((232, 257), 'html.parser.HTMLParser.__init__', 'HTMLParser.__init__', (['self'], {}), '(self)\n', (251, 257), False, 'from html.parser import HTMLParser\n')] |
import frappe
@frappe.whitelist()
def filt_itemby_supplier(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""Select parent from `tabItem Supplier` where supplier= %s""",(filters.get("supplier")));
@frappe.whitelist()
def filteritem(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select item_code, item_name, item_group, volume, item_type,stock_uom from `tabItem`"""); | [
"frappe.whitelist",
"frappe.db.sql"
]
| [((17, 35), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (33, 35), False, 'import frappe\n'), ((235, 253), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (251, 253), False, 'import frappe\n'), ((334, 444), 'frappe.db.sql', 'frappe.db.sql', (['"""select item_code, item_name, item_group, volume, item_type,stock_uom from `tabItem`"""'], {}), "(\n 'select item_code, item_name, item_group, volume, item_type,stock_uom from `tabItem`'\n )\n", (347, 444), False, 'import frappe\n')] |
import os
import sys
import unittest
# Set Python search path to the parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.config import *
class TestLibConfig(unittest.TestCase):
def test_config_noconfigfile(self):
config = BeaconConfigParser('not_exist.cfg')
with self.assertRaises(ConfigParser.NoSectionError):
config.getpath('Test', 'dbdir')
def test_config_default(self):
import os
os.environ['HOME'] = 'notexist'
config = BeaconConfigParser()
with self.assertRaises(ConfigParser.NoSectionError):
config.get('Signal', 'samplerate')
def test_config_items(self):
config = BeaconConfigParser('test_config.cfg')
self.assertEqual(config.get('Test', 'dbdir'), 'nodb')
self.assertEqual(config.getpath('Test', 'dbdir'), 'nodb')
self.assertEqual(config.getint('Signal', 'samplerate'), 16000)
if __name__ == "__main__":
unittest.main(buffer=True)
| [
"unittest.main",
"os.path.dirname"
]
| [((974, 1000), 'unittest.main', 'unittest.main', ([], {'buffer': '(True)'}), '(buffer=True)\n', (987, 1000), False, 'import unittest\n'), ((116, 141), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (131, 141), False, 'import os\n')] |
from kivy.uix.screenmanager import ScreenManager
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
from kivy.animation import Animation
from kivy.core.window import Window
from kivymd.app import MDApp
import kivymd
import kivy
print(
)
def version():
kivy.require('2.0.0')
print(
) | [
"kivy.require"
]
| [((287, 308), 'kivy.require', 'kivy.require', (['"""2.0.0"""'], {}), "('2.0.0')\n", (299, 308), False, 'import kivy\n')] |
import functools
import itertools
import numbers
from ..backend_object import BackendObject
from ..annotation import Annotation
def normalize_types_two_args(f):
@functools.wraps(f)
def normalizer(self, region, o):
"""
Convert any object to an object that we can process.
"""
if isinstance(o, Base):
raise ClaripyValueError("BoolResult can't handle AST objects directly")
if not isinstance(o, StridedInterval):
raise ClaripyVSAOperationError('Unsupported operand type %s' % type(o))
return f(self, region, o)
return normalizer
def normalize_types_one_arg(f):
@functools.wraps(f)
def normalizer(self, o):
"""
Convert any object to an object that we can process.
"""
if isinstance(o, Base):
raise ClaripyValueError("BoolResult can't handle AST objects directly")
return f(self, o)
return normalizer
vs_id_ctr = itertools.count()
class RegionAnnotation(Annotation):
"""
Use RegionAnnotation to annotate ASTs. Normally, an AST annotated by RegionAnnotations is treated as a ValueSet.
Note that Annotation objects are immutable. Do not change properties of an Annotation object without creating a new
one.
"""
def __init__(self, region_id, region_base_addr, offset):
self.region_id = region_id
self.region_base_addr = region_base_addr
self.offset = offset
# Do necessary conversion here
if isinstance(self.region_base_addr, Base):
self.region_base_addr = self.region_base_addr._model_vsa
if isinstance(self.offset, Base):
self.offset = self.offset._model_vsa
@property
def eliminatable(self):
"""
A Region annotation is not eliminatable in simplifications.
:return: False
:rtype: bool
"""
return False
@property
def relocatable(self):
"""
A Region annotation is not relocatable in simplifications.
:return: False
:rtype: bool
"""
return False
#
# Public methods
#
def relocate(self, src, dst):
"""
Override Annotation.relocate().
:param src: The old AST
:param dst: The new AST, as the result of a simplification
:return: The new annotation that should be applied on the new AST
"""
raise ClaripyVSAError('RegionAnnotation is not relocatable')
#
# Overriding base methods
#
def __hash__(self):
return hash((self.region_id, self.region_base_addr, hash(self.offset)))
def __repr__(self):
return "<RegionAnnotation %s:%#08x>" % (self.region_id, self.offset)
class ValueSet(BackendObject):
"""
ValueSet is a mapping between memory regions and corresponding offsets.
"""
def __init__(self, name=None, region=None, region_base_addr=None, bits=None, val=None):
"""
Constructor.
:param str name: Name of this ValueSet object. Only for debugging purposes.
:param str region: Region ID.
:param int region_base_addr: Base address of the region.
:param int bits: Size of the ValueSet.
:param val: an initial offset
"""
self._name = 'VS_%d' % next(vs_id_ctr) if name is None else name
if bits is None:
raise ClaripyVSAError('bits must be specified when creating a ValueSet.')
self._bits = bits
self._si = StridedInterval.empty(bits)
self._regions = {}
self._region_base_addrs = {}
self._reversed = False
# Shortcuts for initialization
# May not be useful though...
if region is not None and region_base_addr is not None and val is not None:
if isinstance(region_base_addr, numbers.Number):
# Convert it to a StridedInterval
region_base_addr = StridedInterval(bits=self._bits, stride=1,
lower_bound=region_base_addr,
upper_bound=region_base_addr)
if isinstance(val, numbers.Number):
val = StridedInterval(bits=bits, stride=0, lower_bound=val, upper_bound=val)
if isinstance(val, StridedInterval):
self._set_si(region, region_base_addr, val)
else:
raise ClaripyVSAError("Unsupported type '%s' for argument 'val'" % type(val))
else:
if region is not None or val is not None:
raise ClaripyVSAError("You must specify 'region' and 'val' at the same time.")
#
# Properties
#
@property
def name(self):
return self._name
@property
def bits(self):
return self._bits
@property
def regions(self):
return self._regions
@property
def reversed(self):
return self._reversed
@property
def unique(self):
return len(self.regions) == 1 and self.regions.values()[0].unique
@property
def cardinality(self):
card = 0
for region in self._regions:
card += self._regions[region].cardinality
return card
@property
def is_empty(self):
return len(self._regions) == 0
@property
def valueset(self):
return self
#
# Private methods
#
def _set_si(self, region, region_base_addr, si):
if isinstance(si, numbers.Number):
si = StridedInterval(bits=self.bits, stride=0, lower_bound=si, upper_bound=si)
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if not isinstance(si, StridedInterval):
raise ClaripyVSAOperationError('Unsupported type %s for si' % type(si))
self._regions[region] = si
self._region_base_addrs[region] = region_base_addr
self._si = self._si.union(region_base_addr + si)
def _merge_si(self, region, region_base_addr, si):
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if region not in self._regions:
self._set_si(region, region_base_addr, si)
else:
self._regions[region] = self._regions[region].union(si)
self._region_base_addrs[region] = self._region_base_addrs[region].union(region_base_addr)
self._si = self._si.union(region_base_addr + si)
#
# Public methods
#
@staticmethod
def empty(bits):
return ValueSet(bits=bits)
def items(self):
return self._regions.items()
def size(self):
return len(self)
def copy(self):
"""
Make a copy of self and return.
:return: A new ValueSet object.
:rtype: ValueSet
"""
vs = ValueSet(bits=self.bits)
vs._regions = self._regions.copy()
vs._region_base_addrs = self._region_base_addrs.copy()
vs._reversed = self._reversed
vs._si = self._si.copy()
return vs
def get_si(self, region):
if region in self._regions:
return self._regions[region]
# TODO: Should we return a None, or an empty SI instead?
return None
def stridedinterval(self):
return self._si
def apply_annotation(self, annotation):
"""
Apply a new annotation onto self, and return a new ValueSet object.
:param RegionAnnotation annotation: The annotation to apply.
:return: A new ValueSet object
:rtype: ValueSet
"""
vs = self.copy()
vs._merge_si(annotation.region_id, annotation.region_base_addr, annotation.offset)
return vs
def __repr__(self):
s = ""
for region, si in self._regions.items():
s = "%s: %s" % (region, si)
return "(" + s + ")"
def __len__(self):
return self._bits
def __hash__(self):
return hash(tuple((r, hash(self._regions[r])) for r in self._regions))
#
# Arithmetic operations
#
@normalize_types_one_arg
def __add__(self, other):
"""
Binary operation: addition
Note that even if "other" is a ValueSet object. we still treat it as a StridedInterval. Adding two ValueSets
together does not make sense (which is essentially adding two pointers together).
:param StridedInterval other: The other operand.
:return: A new ValueSet object
:rtype: ValueSet
"""
new_vs = ValueSet(bits=self.bits)
# Call __add__ on self._si
new_vs._si = self._si.__add__(other)
for region in self._regions:
new_vs._regions[region] = self._regions[region] + other
return new_vs
@normalize_types_one_arg
def __radd__(self, other):
return self.__add__(other)
@normalize_types_one_arg
def __sub__(self, other):
"""
Binary operation: subtraction
:param other: The other operand
:return: A StridedInterval or a ValueSet.
"""
deltas = [ ]
# TODO: Handle more cases
if isinstance(other, ValueSet):
# A subtraction between two ValueSets produces a StridedInterval
if self.regions.keys() == other.regions.keys():
for region in self._regions:
deltas.append(self._regions[region] - other._regions[region])
else:
# TODO: raise the proper exception here
raise NotImplementedError()
delta = StridedInterval.empty(self.bits)
for d in deltas:
delta = delta.union(d)
return delta
else:
# A subtraction between a ValueSet and a StridedInterval produces another ValueSet
new_vs = self.copy()
# Call __sub__ on the base class
new_vs._si = self._si.__sub__(other)
for region, si in new_vs._regions.items():
new_vs._regions[region] = si - other
return new_vs
@normalize_types_one_arg
def __and__(self, other):
"""
Binary operation: and
Note that even if `other` is a ValueSet object, it will be treated as a StridedInterval as well. Doing & between
two pointers that are not the same do not make sense.
:param other: The other operand
:return: A ValueSet as the result
:rtype: ValueSet
"""
if type(other) is ValueSet:
# The only case where calling & between two points makes sense
if self.identical(other):
return self.copy()
if BoolResult.is_true(other == 0):
# Corner case: a & 0 = 0
return StridedInterval(bits=self.bits, stride=0, lower_bound=0, upper_bound=0)
if BoolResult.is_true(other < 0x100):
# Special case - sometimes (addr & mask) is used for testing whether the address is aligned or not
# We return a StridedInterval instead
ret = None
for region, si in self._regions.items():
r = si.__and__(other)
ret = r if ret is None else ret.union(r)
return ret
else:
# We should return a ValueSet here
new_vs = self.copy()
for region, si in self._regions.items():
r = si.__and__(other)
new_vs._regions[region] = r
return new_vs
def __eq__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
if isinstance(other, ValueSet):
same = False
different = False
for region, si in other.regions.items():
if region in self.regions:
comp_ret = self.regions[region] == si
if BoolResult.has_true(comp_ret):
same = True
if BoolResult.has_false(comp_ret):
different = True
else:
different = True
if same and not different:
return TrueResult()
if same and different:
return MaybeResult()
return FalseResult()
elif isinstance(other, StridedInterval):
if 'global' in self.regions:
return self.regions['global'] == other
else:
return FalseResult()
else:
return FalseResult()
def __ne__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
return ~ (self == other)
#
# Backend operations
#
def eval(self, n, signed=False):
if signed:
# How are you going to deal with a negative pointer?
raise ClaripyVSAOperationError('`signed` cannot be True when calling ValueSet.eval().')
results = []
for _, si in self._regions.items():
if len(results) < n:
results.extend(si.eval(n))
return results
@property
def min(self):
"""
The minimum integer value of a value-set. It is only defined when there is exactly one region.
:return: A integer that represents the minimum integer value of this value-set.
:rtype: int
"""
if len(self.regions) != 1:
raise ClaripyVSAOperationError("'min()' onlly works on single-region value-sets.")
return self.get_si(next(iter(self.regions))).min
@property
def max(self):
"""
The maximum integer value of a value-set. It is only defined when there is exactly one region.
:return: A integer that represents the maximum integer value of this value-set.
:rtype: int
"""
if len(self.regions) != 1:
raise ClaripyVSAOperationError("'max()' onlly works on single-region value-sets.")
return self.get_si(next(iter(self.regions))).max
def reverse(self):
# TODO: obviously valueset.reverse is not properly implemented. I'm disabling the old annoying output line for
# TODO: now. I will implement the proper reversing support soon.
vs = self.copy()
vs._reversed = not vs._reversed
return vs
def extract(self, high_bit, low_bit):
"""
Operation extract
- A cheap hack is implemented: a copy of self is returned if (high_bit - low_bit + 1 == self.bits), which is a
ValueSet instance. Otherwise a StridedInterval is returned.
:param high_bit:
:param low_bit:
:return: A ValueSet or a StridedInterval
"""
if high_bit - low_bit + 1 == self.bits:
return self.copy()
if ('global' in self._regions and len(self._regions.keys()) > 1) or \
len(self._regions.keys()) > 0:
si_ret = StridedInterval.top(high_bit - low_bit + 1)
else:
if 'global' in self._regions:
si = self._regions['global']
si_ret = si.extract(high_bit, low_bit)
else:
si_ret = StridedInterval.empty(high_bit - low_bit + 1)
return si_ret
def concat(self, b):
new_vs = ValueSet(bits=self.bits + b.bits)
# TODO: This logic is obviously flawed. Correct it later :-(
if isinstance(b, StridedInterval):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b))
elif isinstance(b, ValueSet):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b.get_si(region)))
else:
raise ClaripyVSAOperationError('ValueSet.concat() got an unsupported operand %s (type %s)' % (b, type(b)))
return new_vs
@normalize_types_one_arg
def union(self, b):
merged_vs = self.copy()
if type(b) is ValueSet:
for region, si in b.regions.items():
if region not in merged_vs._regions:
merged_vs._regions[region] = si
else:
merged_vs._regions[region] = merged_vs._regions[region].union(si)
merged_vs._si = merged_vs._si.union(b._si)
else:
for region, si in merged_vs._regions.items():
merged_vs._regions[region] = merged_vs._regions[region].union(b)
merged_vs._si = merged_vs._si.union(b)
return merged_vs
@normalize_types_one_arg
def widen(self, b):
merged_vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in merged_vs.regions:
merged_vs.regions[region] = si
else:
merged_vs.regions[region] = merged_vs.regions[region].widen(si)
merged_vs._si = merged_vs._si.widen(b._si)
else:
for region in merged_vs._regions:
merged_vs._regions[region] = merged_vs._regions[region].widen(b)
merged_vs._si = merged_vs._si.widen(b)
return merged_vs
@normalize_types_one_arg
def intersection(self, b):
vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in vs.regions:
pass
else:
vs.regions[region] = vs.regions[region].intersection(si)
if vs.regions[region].is_empty:
del vs.regions[region]
vs._si = vs._si.intersection(b._si)
else:
for region in self._regions:
vs.regions[region] = vs.regions[region].intersection(b)
if vs.regions[region].is_empty:
del vs.regions[region]
vs._si = vs._si.intersection(b)
return vs
def identical(self, o):
"""
Used to make exact comparisons between two ValueSets.
:param o: The other ValueSet to compare with.
:return: True if they are exactly same, False otherwise.
"""
if self._reversed != o._reversed:
return False
for region, si in self.regions.items():
if region in o.regions:
o_si = o.regions[region]
if not si.identical(o_si):
return False
else:
return False
return True
from ..ast.base import Base
from .strided_interval import StridedInterval
from .bool_result import BoolResult, TrueResult, FalseResult, MaybeResult
from .errors import ClaripyVSAOperationError, ClaripyVSAError
from ..errors import ClaripyValueError
| [
"itertools.count",
"functools.wraps"
]
| [((965, 982), 'itertools.count', 'itertools.count', ([], {}), '()\n', (980, 982), False, 'import itertools\n'), ((168, 186), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (183, 186), False, 'import functools\n'), ((653, 671), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (668, 671), False, 'import functools\n')] |
import logging
from episodes import find_updates, db, count_all
from logging import error as logi
from flask import Flask, jsonify, request
def create_app(config, debug=False, testing=False, config_overrides=None):
app = Flask(__name__)
app.config.from_object(config)
app.config['JSON_AS_ASCII'] = False
app.debug = debug
app.testing = testing
if config_overrides:
app.config.update(config_overrides)
# Configure logging
if not app.testing:
logging.basicConfig(level=logging.INFO)
@app.before_request
def before_request():
db.connect()
@app.after_request
def after_request(response):
db.close()
return response
@app.route('/get_new_episodes')
def get_new_episodes():
appengine_request = request.headers.get('X-Appengine-Cron')
if appengine_request == 'true':
from scraper import update_episodes
update_episodes()
return '<h1>Success</h1>'
else:
return '<h1>This is a crobjob and all the requests should come from appengine.</h1>'
@app.route('/get_updates')
def get_update():
timestamp = request.args.get('timestamp', '')
if timestamp == '':
logi('Default timestamp')
timestamp = 0
else:
timestamp = long(timestamp)
result = find_updates(timestamp)
return jsonify(result)
@app.route('/')
def welcome():
message = '{}{}{}{}'.format('<h1>Welcome to FardaStationAPI WebService</h1>',
'<p>To get information about the latest episodes of Fardaa Station (by '
'RadioFarda.com) please send a GET request to '
'http://fardastationapi.appspot.com/get_updates URL.</p>',
'<p>A UNIX epoch timestamp can also be passed in as an argument to filter out the '
'episodes before that timestamp. Example: '
'https://fardastationapi.appspot.com/get_updates?timestamp=1512629949</p>',
'<h1>Current number of episodes: {}</h1>'.format(count_all()))
return message
# Add an error handler. This is useful for debugging the live application,
# however, you should disable the output of the exception for production
# applications.
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
return app
| [
"logging.basicConfig",
"flask.request.args.get",
"scraper.update_episodes",
"flask.Flask",
"episodes.db.close",
"episodes.db.connect",
"episodes.find_updates",
"episodes.count_all",
"logging.error",
"flask.request.headers.get",
"flask.jsonify"
]
| [((228, 243), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (233, 243), False, 'from flask import Flask, jsonify, request\n'), ((495, 534), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (514, 534), False, 'import logging\n'), ((594, 606), 'episodes.db.connect', 'db.connect', ([], {}), '()\n', (604, 606), False, 'from episodes import find_updates, db, count_all\n'), ((672, 682), 'episodes.db.close', 'db.close', ([], {}), '()\n', (680, 682), False, 'from episodes import find_updates, db, count_all\n'), ((800, 839), 'flask.request.headers.get', 'request.headers.get', (['"""X-Appengine-Cron"""'], {}), "('X-Appengine-Cron')\n", (819, 839), False, 'from flask import Flask, jsonify, request\n'), ((1181, 1214), 'flask.request.args.get', 'request.args.get', (['"""timestamp"""', '""""""'], {}), "('timestamp', '')\n", (1197, 1214), False, 'from flask import Flask, jsonify, request\n'), ((1380, 1403), 'episodes.find_updates', 'find_updates', (['timestamp'], {}), '(timestamp)\n', (1392, 1403), False, 'from episodes import find_updates, db, count_all\n'), ((1420, 1435), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (1427, 1435), False, 'from flask import Flask, jsonify, request\n'), ((940, 957), 'scraper.update_episodes', 'update_episodes', ([], {}), '()\n', (955, 957), False, 'from scraper import update_episodes\n'), ((1256, 1281), 'logging.error', 'logi', (['"""Default timestamp"""'], {}), "('Default timestamp')\n", (1260, 1281), True, 'from logging import error as logi\n'), ((2247, 2258), 'episodes.count_all', 'count_all', ([], {}), '()\n', (2256, 2258), False, 'from episodes import find_updates, db, count_all\n')] |
# -*- coding: utf-8 -*-
# catapult: runs python scripts in already running processes to eliminate the
# python interpreter startup time.
#
# The lexicon for sparv.saldo.annotate and sparv.saldo.compound can be pre-loaded and
# shared between processes. See the variable annotators in handle and start.
#
# Run scripts in the catapult with the c program catalaunch.
from builtins import range, object
from multiprocessing import Process, cpu_count
from decorator import decorator
import logging
import os
import re
import runpy
import socket
import sys
import traceback
import sparv.util as util
RECV_LEN = 4096
# Important to preload all modules otherwise processes will need to do
# it upon request, introducing new delays.
#
# These imports uses the __all__ variables in the __init__ files.
from sparv.util import *
from sparv import *
logging.basicConfig(format="%(process)d %(asctime)-15s %(message)s")
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
"""
Splits at every space that is not preceded by a backslash.
"""
splitter = re.compile('(?<!\\\\) ')
def set_last_argument(*values):
"""
Decorates a function f, setting its last argument(s) to the given value(s).
Used for setting the saldo lexicons to sparv.saldo.annotate and
sparv.saldo.compound, and the process "dictionary" to sparv.malt.maltparse.
The decorator module is used to give the same signature and
docstring to the function, which is exploited in sparv.util.run.
"""
@decorator
def inner(f, *args, **kwargs):
args = list(args)
for v in values:
args.pop()
for v in values:
args.append(v)
f(*args, **kwargs)
return inner
def handle(client_sock, verbose, annotators):
"""
Handle a client: parse the arguments, change to the relevant
directory, then run the script. Stdout and stderr are directed
to /dev/null or to the client socket.
"""
def chunk_send(msg):
"""
Sends a message chunk until it is totally received in the other end
"""
msg = msg.encode(util.UTF8)
while len(msg) > 0:
sent = client_sock.send(msg)
if sent == 0:
raise RuntimeError("socket connection broken")
msg = msg[sent:]
def set_stdout_stderr():
"""
Put stdout and stderr to the client_sock, if verbose.
Returns the clean-up handler.
"""
class Writer(object):
def write(self, msg):
log.debug(msg)
if verbose:
chunk_send(msg)
def flush(self):
pass
orig_stds = sys.stdout, sys.stderr
w = Writer()
sys.stdout = w
sys.stderr = w
def cleanup():
"""
Restores stdout and stderr
"""
sys.stdout = orig_stds[0]
sys.stderr = orig_stds[1]
client_sock.close()
return cleanup
# Receive data
data = b""
new_data = None
# Message is terminated with a lone \
while new_data is None or not new_data.endswith(b'\\'):
new_data = client_sock.recv(RECV_LEN)
log.debug("Received %s", new_data)
data += new_data
if len(new_data) == 0:
log.warning("Received null!")
chunk_send("Error when receiving: got an empty message")
return
# Drop the terminating \
data = data[0:-1]
# Split arguments on spaces, and replace '\ ' to ' ' and \\ to \
args = [arg.replace('\\ ', ' ').replace('\\\\', '\\')
for arg in re.split(splitter, data.decode(util.UTF8))]
log.debug("Args: %s", args)
### PING? ###
if len(args) == 2 and args[1] == "PING":
log.info("Ping requested")
chunk_send("PONG")
return
# If the first argument is -m, the following argument is a module
# name instead of a script name
module_flag = len(args) > 2 and args[1] == '-m'
if module_flag:
args.pop(1)
if len(args) > 1:
# First argument is the pwd of the caller
old_pwd = os.getcwd()
pwd = args.pop(0)
log.info('Running %s', args[0])
log.debug('with arguments: %s', ' '.join(args[1:]))
log.debug('in directory %s', pwd)
# Set stdout and stderr, which returns the cleaup function
cleanup = set_stdout_stderr()
# Run the command
try:
sys.argv = args
os.chdir(pwd)
if module_flag:
annotator = annotators.get(args[0], None)
if not annotator:
# some of the annotators require two arguments
annotator = annotators.get((args[0], args[1]), None)
if annotator:
# skip the first argument now
sys.argv = args[0]
sys.argv.extend(args[2:])
if annotator:
util.run.main(annotator)
else:
runpy.run_module(args[0], run_name='__main__')
else:
runpy.run_path(args[0], run_name='__main__')
except (ImportError, IOError):
# If file does not exist, send the error message
chunk_send("%s\n" % sys.exc_info()[1])
cleanup()
log.exception("File does not exist")
except:
# Send other errors, and if verbose, send tracebacks
chunk_send("%s\n" % sys.exc_info()[1])
traceback.print_exception(*sys.exc_info())
cleanup()
log.exception("Unknown error")
else:
cleanup()
os.chdir(old_pwd)
# Run the cleanup function if there is one (only used with malt)
annotators.get((args[0], 'cleanup'), lambda: None)()
log.info('Completed %s', args[0])
else:
log.info('Cannot handle %s', data)
chunk_send('Cannot handle %s\n' % data)
def worker(server_socket, verbose, annotators, malt_args=None, swener_args=None):
"""
Workers listen to the socket server, and handle incoming requests
Each process starts an own maltparser process, because they are
cheap and cannot serve multiple clients at the same time.
"""
if malt_args:
process_dict = dict(process=None, restart=True)
def start_malt():
if process_dict['process'] is None or process_dict['restart']:
old_process = process_dict['process']
old_process and util.system.kill_process(old_process)
malt_process = malt.maltstart(**malt_args)
if verbose:
log.info('(Re)started malt process: %s', malt_process)
process_dict['process'] = malt_process
annotators['sparv.malt'] = set_last_argument(process_dict)(malt.maltparse)
elif verbose:
log.info("Not restarting malt this time")
start_malt()
annotators['sparv.malt', 'cleanup'] = start_malt
if swener_args:
process_dict = dict(process=None, restart=True)
def start_swener():
if process_dict['process'] is None or process_dict['restart']:
old_process = process_dict['process']
old_process and util.system.kill_process(old_process)
swener_process = swener.swenerstart(**swener_args)
if verbose:
log.info('(Re)started SweNER process: %s', swener_process)
process_dict['process'] = swener_process
annotators['sparv.swener'] = set_last_argument(process_dict)(swener.tag_ne)
elif verbose:
log.info("Not restarting SweNER this time")
start_swener()
annotators['sparv.swener', 'cleanup'] = start_swener
if verbose:
log.info("Worker running!")
while True:
client_sock, addr = server_socket.accept()
try:
handle(client_sock, verbose, annotators)
except:
log.exception('Error in handling code')
traceback.print_exception(*sys.exc_info())
client_sock.close()
def start(socket_path, processes=1, verbose='false',
saldo_model=None, compound_model=None, stats_model=None,
dalin_model=None, swedberg_model=None, blingbring_model=None,
malt_jar=None, malt_model=None, malt_encoding=util.UTF8,
sentiment_model=None, swefn_model=None, swener=False,
swener_encoding=util.UTF8):
"""
Starts a catapult on a socket file, using a number of processes.
If verbose is false, all stdout and stderr programs produce is
piped to /dev/null, otherwise it is sent to the client. The
computation is done by the catapult processes, however.
Regardless of what verbose is, client errors should be reported
both in the catapult and to the client.
The saldo model and compound model can be pre-loaded and shared in
memory between processes.
Start processes using catalaunch.
"""
if os.path.exists(socket_path):
log.error('socket %s already exists', socket_path)
exit(1)
verbose = verbose.lower() == 'true'
log.info('Verbose: %s', verbose)
# If processes does not contain an int, set it to the number of processors
try:
processes = int(processes)
except:
processes = cpu_count()
# Start the socket
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_socket.bind(socket_path)
server_socket.listen(processes)
# The dictionary of functions with saved lexica, indexed by module name strings
annotators = {}
# Load Saldo and older lexicons
lexicons = [m for m in [saldo_model, dalin_model, swedberg_model] if m]
if lexicons:
lexicon_dict = {}
for lexicon in lexicons:
lexicon_dict[os.path.basename(lexicon).rstrip(".pickle")] = saldo.SaldoLexicon(lexicon)
annotators['sparv.saldo'] = set_last_argument(lexicon_dict)(saldo.annotate)
if stats_model and compound_model:
annotators['sparv.compound'] = set_last_argument(
compound.SaldoCompLexicon(compound_model),
compound.StatsLexicon(stats_model))(compound.annotate)
elif compound_model:
annotators['sparv.compound_simple'] = set_last_argument(
compound_simple.SaldoLexicon(compound_model))(compound_simple.annotate)
# if blingbring_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(blingbring_model))(lexical_classes.annotate_bb_words)
# if swefn_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(swefn_model))(lexical_classes.annotate_swefn_words)
if sentiment_model:
annotators['sparv.sentiment'] = set_last_argument(
util.PickledLexicon(sentiment_model))(sentiment.sentiment)
# if models_1700s:
# models = models_1700s.split()
# lexicons = [saldo.SaldoLexicon(lex) for lex in models]
# annotators[('sparv.fsv', '--annotate_fallback')] = set_last_argument(lexicons)(fsv.annotate_fallback)
# annotators[('sparv.fsv', '--annotate_full')] = set_last_argument(lexicons)(fsv.annotate_full)
if verbose:
log.info('Loaded annotators: %s', list(annotators.keys()))
if malt_jar and malt_model:
malt_args = dict(maltjar=malt_jar, model=malt_model,
encoding=malt_encoding, send_empty_sentence=True)
else:
malt_args = None
if swener:
swener_args = dict(stdin="", encoding=swener_encoding, verbose=True)
else:
swener_args = None
# Start processes-1 workers
workers = [Process(target=worker, args=[server_socket, verbose, annotators, malt_args])
for i in range(processes - 1)]
for p in workers:
p.start()
# Additionally, let this thread be worker 0
worker(server_socket, verbose, annotators, malt_args, swener_args)
if __name__ == '__main__':
util.run.main(start)
| [
"logging.basicConfig",
"os.path.exists",
"logging.getLogger",
"sparv.util.run.main",
"socket.socket",
"re.compile",
"multiprocessing.Process",
"multiprocessing.cpu_count",
"os.getcwd",
"os.chdir",
"runpy.run_module",
"builtins.range",
"sparv.util.PickledLexicon",
"sparv.util.system.kill_process",
"sys.exc_info",
"sys.argv.extend",
"os.path.basename",
"runpy.run_path"
]
| [((843, 911), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(process)d %(asctime)-15s %(message)s"""'}), "(format='%(process)d %(asctime)-15s %(message)s')\n", (862, 911), False, 'import logging\n'), ((918, 945), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (935, 945), False, 'import logging\n'), ((1052, 1076), 're.compile', 're.compile', (['"""(?<!\\\\\\\\) """'], {}), "('(?<!\\\\\\\\) ')\n", (1062, 1076), False, 'import re\n'), ((9147, 9174), 'os.path.exists', 'os.path.exists', (['socket_path'], {}), '(socket_path)\n', (9161, 9174), False, 'import os\n'), ((9542, 9591), 'socket.socket', 'socket.socket', (['socket.AF_UNIX', 'socket.SOCK_STREAM'], {}), '(socket.AF_UNIX, socket.SOCK_STREAM)\n', (9555, 9591), False, 'import socket\n'), ((12178, 12198), 'sparv.util.run.main', 'util.run.main', (['start'], {}), '(start)\n', (12191, 12198), True, 'import sparv.util as util\n'), ((4144, 4155), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4153, 4155), False, 'import os\n'), ((5731, 5748), 'os.chdir', 'os.chdir', (['old_pwd'], {}), '(old_pwd)\n', (5739, 5748), False, 'import os\n'), ((11862, 11938), 'multiprocessing.Process', 'Process', ([], {'target': 'worker', 'args': '[server_socket, verbose, annotators, malt_args]'}), '(target=worker, args=[server_socket, verbose, annotators, malt_args])\n', (11869, 11938), False, 'from multiprocessing import Process, cpu_count\n'), ((4511, 4524), 'os.chdir', 'os.chdir', (['pwd'], {}), '(pwd)\n', (4519, 4524), False, 'import os\n'), ((9486, 9497), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (9495, 9497), False, 'from multiprocessing import Process, cpu_count\n'), ((11963, 11983), 'builtins.range', 'range', (['(processes - 1)'], {}), '(processes - 1)\n', (11968, 11983), False, 'from builtins import range, object\n'), ((5167, 5211), 'runpy.run_path', 'runpy.run_path', (['args[0]'], {'run_name': '"""__main__"""'}), "(args[0], run_name='__main__')\n", (5181, 5211), False, 'import runpy\n'), ((10992, 11028), 'sparv.util.PickledLexicon', 'util.PickledLexicon', (['sentiment_model'], {}), '(sentiment_model)\n', (11011, 11028), True, 'import sparv.util as util\n'), ((5019, 5043), 'sparv.util.run.main', 'util.run.main', (['annotator'], {}), '(annotator)\n', (5032, 5043), True, 'import sparv.util as util\n'), ((5086, 5132), 'runpy.run_module', 'runpy.run_module', (['args[0]'], {'run_name': '"""__main__"""'}), "(args[0], run_name='__main__')\n", (5102, 5132), False, 'import runpy\n'), ((6595, 6632), 'sparv.util.system.kill_process', 'util.system.kill_process', (['old_process'], {}), '(old_process)\n', (6619, 6632), True, 'import sparv.util as util\n'), ((7374, 7411), 'sparv.util.system.kill_process', 'util.system.kill_process', (['old_process'], {}), '(old_process)\n', (7398, 7411), True, 'import sparv.util as util\n'), ((4942, 4967), 'sys.argv.extend', 'sys.argv.extend', (['args[2:]'], {}), '(args[2:])\n', (4957, 4967), False, 'import sys\n'), ((5605, 5619), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5617, 5619), False, 'import sys\n'), ((8202, 8216), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8214, 8216), False, 'import sys\n'), ((9983, 10008), 'os.path.basename', 'os.path.basename', (['lexicon'], {}), '(lexicon)\n', (9999, 10008), False, 'import os\n'), ((5344, 5358), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5356, 5358), False, 'import sys\n'), ((5547, 5561), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5559, 5561), False, 'import sys\n')] |
import datetime
def iso_extract_info(string):
"""
Will get all of the info and return it as an array
:param string: ISO formatted string that will be used for extraction
:return: array [year, month, day, military_time_hour, minutes, hours]
:note: every item is an int except for minutes
:note: hours only is there is military_time_hour is greater than 12
"""
elements = []
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
hours = 0
elements.append(year_int)
elements.append(month_int)
elements.append(day_int)
elements.append(minutes_int)
if military_time_hours_int > 12:
hours += military_time_hours_int - 12
elements.append(hours)
return elements
# # Testing:
# print("[year, month, day, military_time_hour, minutes, hours]")
# print(iso_extract_info('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def iso_format_to_regular(string):
"""
Will take a string that is an iso formatted string and make it look readable
:param string: the iso formatted string
:return: str
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
if military_time_hours_int > 12:
hours = military_time_hours_int - 12
final_string = "{month}/{day}/{year} {hour}:{minute}PM".format(
month=month_int, day=day_int, year=year_int, hour=hours, minute=minutes_int)
return final_string
else:
final_string = "{month}/{day}/{year} {hour}:{minute}AM".format(
month=month_int, day=day_int, year=year_int, hour=military_time_hours_int, minute=minutes_int)
return final_string
# Testing:
# print(iso_format_to_regular('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def fix_time(strange_date):
"""
Will rearrange the strange date that Google gives and repalce it with the normal string.
:param strange_date: strange time that google gives when an event is marked as "all day"
:return: str
"""
items = strange_date.split("-")
year_int = int(items[0])
month_int = int(items[1])
day_int = int(items[2])
new_str = "{month}/{day}/{year}".format(
month=month_int, day=day_int, year=year_int)
return new_str
# Doesn't use the "iso_extract_info" function
def multiday_checker_STRANGE(start_date, end_date):
"""
Will check if an event is more than day long
:param start_date: Strange Google formatted date of the start of the event
:param end_date: Strange Google formatted date of the end of the event
:return: Boolean
"""
start_date_items = start_date.split("-")
end_date_items = end_date.split("-")
start_date_sum = 0
end_date_sum = 0
for string in start_date_items:
number = int(string)
start_date_sum += number
for string in end_date_items:
number = int(string)
end_date_sum += number
date_dif = start_date_sum - end_date_sum
if date_dif > 2:
return True
else:
return False
# Testing:
# print(multiday_checker_STRANGE('2019-04-21', '2019-04-22'))
# Doesn't use the "iso_extract_info" function
def STRANGE_string_weekday(string):
"""
Will take a string that is a date formatted in the Google format and find what day of the week it is
:param string: Google formatted string for the date
:return: string
"""
items = string.split("/")
year_int = int(items[2])
month_int = int(items[0])
day_int = int(items[1])
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(STRANGE_string_weekday("2019-04-27"))
# Doesn't use the "iso_extract_info" function
def ISO_string_weekday(string):
"""
Will take a string that is a date formatted in the ISO format and find what day of the week it is
:param string: ISO formatted string for the date
:return: string
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(ISO_string_weekday('2019-06-28T16:00:00-04:00'))
| [
"datetime.date"
]
| [((3981, 4024), 'datetime.date', 'datetime.date', (['year_int', 'month_int', 'day_int'], {}), '(year_int, month_int, day_int)\n', (3994, 4024), False, 'import datetime\n'), ((5018, 5061), 'datetime.date', 'datetime.date', (['year_int', 'month_int', 'day_int'], {}), '(year_int, month_int, day_int)\n', (5031, 5061), False, 'import datetime\n')] |
from django.urls import path
from issue_template.views import IssueTemplateView
urlpatterns = [
path(
'<str:owner>/<str:repo>/<str:token_auth>/',
IssueTemplateView.as_view()
),
]
| [
"issue_template.views.IssueTemplateView.as_view"
]
| [((168, 195), 'issue_template.views.IssueTemplateView.as_view', 'IssueTemplateView.as_view', ([], {}), '()\n', (193, 195), False, 'from issue_template.views import IssueTemplateView\n')] |
import collections
import nltk
import os
from sklearn import (
datasets, model_selection, feature_extraction, linear_model, naive_bayes,
ensemble
)
def extract_features(corpus):
'''Extract TF-IDF features from corpus'''
sa_stop_words = nltk.corpus.stopwords.words("english")
# words that might invert a sentence's meaning
white_list = [
'what', 'but', 'if', 'because', 'as', 'until', 'against',
'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'why', 'how', 'all', 'any',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own',
'same', 'so', 'than', 'too', 'can', 'will', 'just', 'don', 'should']
# take these out of the standard NLTK stop word list
sa_stop_words = [sw for sw in sa_stop_words if sw not in white_list]
# vectorize means we turn non-numerical data into an array of numbers
count_vectorizer = feature_extraction.text.CountVectorizer(
lowercase=True, # for demonstration, True by default
tokenizer=nltk.word_tokenize, # use the NLTK tokenizer
min_df=2, # minimum document frequency, i.e. the word must appear more than once.
ngram_range=(1, 2),
stop_words=sa_stop_words
)
processed_corpus = count_vectorizer.fit_transform(corpus)
processed_corpus = feature_extraction.text.TfidfTransformer().fit_transform(
processed_corpus)
return processed_corpus
data_directory = 'movie_reviews'
movie_sentiment_data = datasets.load_files(data_directory, shuffle=True)
print('{} files loaded.'.format(len(movie_sentiment_data.data)))
print('They contain the following classes: {}.'.format(
movie_sentiment_data.target_names))
movie_tfidf = extract_features(movie_sentiment_data.data)
X_train, X_test, y_train, y_test = model_selection.train_test_split(
movie_tfidf, movie_sentiment_data.target, test_size=0.30, random_state=42)
# similar to nltk.NaiveBayesClassifier.train()
clf1 = linear_model.LogisticRegression()
clf1.fit(X_train, y_train)
print('Logistic Regression performance: {}'.format(clf1.score(X_test, y_test)))
clf2 = linear_model.SGDClassifier()
clf2.fit(X_train, y_train)
print('SGDClassifier performance: {}'.format(clf2.score(X_test, y_test)))
clf3 = naive_bayes.MultinomialNB()
clf3.fit(X_train, y_train)
print('MultinomialNB performance: {}'.format(clf3.score(X_test, y_test)))
clf4 = naive_bayes.BernoulliNB()
clf4.fit(X_train, y_train)
print('BernoulliNB performance: {}'.format(clf4.score(X_test, y_test)))
voting_model = ensemble.VotingClassifier(
estimators=[('lr', clf1), ('sgd', clf2), ('mnb', clf3), ('bnb', clf4)],
voting='hard')
voting_model.fit(X_train, y_train)
print('Voting classifier performance: {}'.format(
voting_model.score(X_test, y_test)))
| [
"sklearn.feature_extraction.text.TfidfTransformer",
"sklearn.linear_model.SGDClassifier",
"sklearn.ensemble.VotingClassifier",
"nltk.corpus.stopwords.words",
"sklearn.model_selection.train_test_split",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.datasets.load_files",
"sklearn.linear_model.LogisticRegression",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.naive_bayes.BernoulliNB"
]
| [((1548, 1597), 'sklearn.datasets.load_files', 'datasets.load_files', (['data_directory'], {'shuffle': '(True)'}), '(data_directory, shuffle=True)\n', (1567, 1597), False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((1854, 1964), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['movie_tfidf', 'movie_sentiment_data.target'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(movie_tfidf, movie_sentiment_data.target,\n test_size=0.3, random_state=42)\n', (1886, 1964), False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((2022, 2055), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {}), '()\n', (2053, 2055), False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((2171, 2199), 'sklearn.linear_model.SGDClassifier', 'linear_model.SGDClassifier', ([], {}), '()\n', (2197, 2199), False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((2309, 2336), 'sklearn.naive_bayes.MultinomialNB', 'naive_bayes.MultinomialNB', ([], {}), '()\n', (2334, 2336), False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((2446, 2471), 'sklearn.naive_bayes.BernoulliNB', 'naive_bayes.BernoulliNB', ([], {}), '()\n', (2469, 2471), False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((2588, 2704), 'sklearn.ensemble.VotingClassifier', 'ensemble.VotingClassifier', ([], {'estimators': "[('lr', clf1), ('sgd', clf2), ('mnb', clf3), ('bnb', clf4)]", 'voting': '"""hard"""'}), "(estimators=[('lr', clf1), ('sgd', clf2), ('mnb',\n clf3), ('bnb', clf4)], voting='hard')\n", (2613, 2704), False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((255, 293), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (282, 293), False, 'import nltk\n'), ((967, 1113), 'sklearn.feature_extraction.text.CountVectorizer', 'feature_extraction.text.CountVectorizer', ([], {'lowercase': '(True)', 'tokenizer': 'nltk.word_tokenize', 'min_df': '(2)', 'ngram_range': '(1, 2)', 'stop_words': 'sa_stop_words'}), '(lowercase=True, tokenizer=nltk.\n word_tokenize, min_df=2, ngram_range=(1, 2), stop_words=sa_stop_words)\n', (1006, 1113), False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((1377, 1419), 'sklearn.feature_extraction.text.TfidfTransformer', 'feature_extraction.text.TfidfTransformer', ([], {}), '()\n', (1417, 1419), False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n')] |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pickle
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import librosa
import torch
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.tts.torch.helpers import (
BetaBinomialInterpolator,
beta_binomial_prior_distribution,
general_padding,
)
from nemo.collections.tts.torch.tts_data_types import (
DATA_STR2DATA_CLASS,
MAIN_DATA_TYPES,
VALID_SUPPLEMENTARY_DATA_TYPES,
DurationPrior,
Durations,
Energy,
LMTokens,
LogMel,
Pitch,
SpeakerID,
WithLens,
)
from nemo.collections.tts.torch.tts_tokenizers import BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer
from nemo.core.classes import Dataset
from nemo.utils import logging
class TTSDataset(Dataset):
def __init__(
self,
manifest_filepath: str,
sample_rate: int,
text_tokenizer: Union[BaseTokenizer, Callable[[str], List[int]]],
tokens: Optional[List[str]] = None,
text_normalizer: Optional[Union[Normalizer, Callable[[str], str]]] = None,
text_normalizer_call_args: Optional[Dict] = None,
text_tokenizer_pad_id: Optional[int] = None,
sup_data_types: Optional[List[str]] = None,
sup_data_path: Optional[Union[Path, str]] = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
ignore_file: Optional[str] = None,
trim: bool = False,
n_fft=1024,
win_length=None,
hop_length=None,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=None,
**kwargs,
):
"""Dataset that loads main data types (audio and text) and specified supplementary data types (e.g. log mel, durations, pitch).
Most supplementary data types will be computed on the fly and saved in the supplementary_folder if they did not exist before.
Arguments for supplementary data should be also specified in this class and they will be used from kwargs (see keyword args section).
Args:
manifest_filepath (str, Path, List[str, Path]): Path(s) to the .json manifests containing information on the
dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid
json. Each line should contain the following:
"audio_filepath": <PATH_TO_WAV>
"mel_filepath": <PATH_TO_LOG_MEL_PT> (Optional)
"duration": <Duration of audio clip in seconds> (Optional)
"text": <THE_TRANSCRIPT> (Optional)
sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to.
text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer.
tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer.
text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer.
text_normalizer_call_args (Optional[Dict]): Additional arguments for text_normalizer function.
text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer.
sup_data_types (Optional[List[str]]): List of supplementary data types.
sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch).
max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
ignore_file (Optional[str, Path]): The location of a pickle-saved list of audio_ids (the stem of the audio
files) that will be pruned prior to training. Defaults to None which does not prune.
trim (Optional[bool]): Whether to apply librosa.effects.trim to the audio file. Defaults to False.
n_fft (Optional[int]): The number of fft samples. Defaults to 1024
win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft.
hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4.
window (Optional[str]): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the
equivalent torch window function.
n_mels (Optional[int]): The number of mel filters. Defaults to 80.
lowfreq (Optional[int]): The lowfreq input to the mel filter calculation. Defaults to 0.
highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None.
Keyword Args:
durs_file (Optional[str]): String path to pickled durations location.
durs_type (Optional[str]): Type of durations. Currently supported only "aligned-based".
use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator. Defaults to False.
pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2').
pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7').
pitch_avg (Optional[float]): The mean that we use to normalize the pitch.
pitch_std (Optional[float]): The std that we use to normalize the pitch.
pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_avg and pitch_std) or not.
"""
super().__init__()
self.text_normalizer = text_normalizer
self.text_normalizer_call = (
self.text_normalizer.normalize if isinstance(self.text_normalizer, Normalizer) else self.text_normalizer
)
self.text_normalizer_call_args = text_normalizer_call_args if text_normalizer_call_args is not None else {}
self.text_tokenizer = text_tokenizer
if isinstance(self.text_tokenizer, BaseTokenizer):
self.text_tokenizer_pad_id = text_tokenizer.pad
self.tokens = text_tokenizer.tokens
else:
if text_tokenizer_pad_id is None:
raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer")
if tokens is None:
raise ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer")
self.text_tokenizer_pad_id = text_tokenizer_pad_id
self.tokens = tokens
if isinstance(manifest_filepath, str):
manifest_filepath = [manifest_filepath]
self.manifest_filepath = manifest_filepath
if sup_data_path is not None:
Path(sup_data_path).mkdir(parents=True, exist_ok=True)
self.sup_data_path = sup_data_path
self.sup_data_types = (
[DATA_STR2DATA_CLASS[d_as_str] for d_as_str in sup_data_types] if sup_data_types is not None else []
)
self.sup_data_types_set = set(self.sup_data_types)
self.data = []
audio_files = []
total_duration = 0
for manifest_file in self.manifest_filepath:
with open(Path(manifest_file).expanduser(), 'r') as f:
logging.info(f"Loading dataset from {manifest_file}.")
for line in tqdm(f):
item = json.loads(line)
file_info = {
"audio_filepath": item["audio_filepath"],
"mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None,
"duration": item["duration"] if "duration" in item else None,
"text_tokens": None,
"speaker_id": item["speaker"] if "speaker" in item else None,
}
if "text" in item:
text = item["text"]
if self.text_normalizer is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_args)
text_tokens = self.text_tokenizer(text)
file_info["raw_text"] = item["text"]
file_info["text_tokens"] = text_tokens
audio_files.append(file_info)
if file_info["duration"] is None:
logging.info(
"Not all audio files have duration information. Duration logging will be disabled."
)
total_duration = None
if total_duration is not None:
total_duration += item["duration"]
logging.info(f"Loaded dataset with {len(audio_files)} files.")
if total_duration is not None:
logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.")
if ignore_file:
logging.info(f"using {ignore_file} to prune dataset.")
with open(Path(ignore_file).expanduser(), "rb") as f:
wavs_to_ignore = set(pickle.load(f))
pruned_duration = 0 if total_duration is not None else None
pruned_items = 0
for item in audio_files:
audio_path = item['audio_filepath']
audio_id = Path(audio_path).stem
# Prune data according to min/max_duration & the ignore file
if total_duration is not None:
if (min_duration and item["duration"] < min_duration) or (
max_duration and item["duration"] > max_duration
):
pruned_duration += item["duration"]
pruned_items += 1
continue
if ignore_file and (audio_id in wavs_to_ignore):
pruned_items += 1
pruned_duration += item["duration"]
wavs_to_ignore.remove(audio_id)
continue
self.data.append(item)
logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(self.data)} files")
if pruned_duration is not None:
logging.info(
f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains "
f"{(total_duration - pruned_duration) / 3600:.2f} hours."
)
self.sample_rate = sample_rate
self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate)
self.trim = trim
self.n_fft = n_fft
self.n_mels = n_mels
self.lowfreq = lowfreq
self.highfreq = highfreq
self.window = window
self.win_length = win_length or self.n_fft
self.hop_length = hop_length
self.hop_len = self.hop_length or self.n_fft // 4
self.fb = torch.tensor(
librosa.filters.mel(
self.sample_rate, self.n_fft, n_mels=self.n_mels, fmin=self.lowfreq, fmax=self.highfreq
),
dtype=torch.float,
).unsqueeze(0)
window_fn = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}.get(self.window, None)
self.stft = lambda x: torch.stft(
input=x,
n_fft=self.n_fft,
hop_length=self.hop_len,
win_length=self.win_length,
window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None,
)
for data_type in self.sup_data_types:
if data_type not in VALID_SUPPLEMENTARY_DATA_TYPES:
raise NotImplementedError(f"Current implementation of TTSDataset doesn't support {data_type} type.")
getattr(self, f"add_{data_type.name}")(**kwargs)
def add_log_mel(self, **kwargs):
pass
def add_durations(self, **kwargs):
durs_file = kwargs.pop('durs_file')
durs_type = kwargs.pop('durs_type')
audio_stem2durs = torch.load(durs_file)
self.durs = []
for tag in [Path(d["audio_filepath"]).stem for d in self.data]:
durs = audio_stem2durs[tag]
if durs_type == "aligner-based":
self.durs.append(durs)
else:
raise NotImplementedError(
f"{durs_type} duration type is not supported. Only align-based is supported at this moment."
)
def add_duration_prior(self, **kwargs):
self.use_beta_binomial_interpolator = kwargs.pop('use_beta_binomial_interpolator', False)
if self.use_beta_binomial_interpolator:
self.beta_binomial_interpolator = BetaBinomialInterpolator()
def add_pitch(self, **kwargs):
self.pitch_fmin = kwargs.pop("pitch_fmin", librosa.note_to_hz('C2'))
self.pitch_fmax = kwargs.pop("pitch_fmax", librosa.note_to_hz('C7'))
self.pitch_avg = kwargs.pop("pitch_avg", None)
self.pitch_std = kwargs.pop("pitch_std", None)
self.pitch_norm = kwargs.pop("pitch_norm", False)
def add_energy(self, **kwargs):
pass
def add_speaker_id(self, **kwargs):
pass
def get_spec(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.stft(audio)
if spec.dtype in [torch.cfloat, torch.cdouble]:
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9)
return spec
def get_log_mel(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.get_spec(audio)
mel = torch.matmul(self.fb.to(spec.dtype), spec)
log_mel = torch.log(torch.clamp(mel, min=torch.finfo(mel.dtype).tiny))
return log_mel
def __getitem__(self, index):
sample = self.data[index]
audio_stem = Path(sample["audio_filepath"]).stem
features = self.featurizer.process(sample["audio_filepath"], trim=self.trim)
audio, audio_length = features, torch.tensor(features.shape[0]).long()
text = torch.tensor(sample["text_tokens"]).long()
text_length = torch.tensor(len(sample["text_tokens"])).long()
log_mel, log_mel_length = None, None
if LogMel in self.sup_data_types_set:
mel_path = sample["mel_filepath"]
if mel_path is not None and Path(mel_path).exists():
log_mel = torch.load(mel_path)
else:
mel_path = Path(self.sup_data_path) / f"mel_{audio_stem}.pt"
if mel_path.exists():
log_mel = torch.load(mel_path)
else:
log_mel = self.get_log_mel(audio)
torch.save(log_mel, mel_path)
log_mel = log_mel.squeeze(0)
log_mel_length = torch.tensor(log_mel.shape[1]).long()
durations = None
if Durations in self.sup_data_types_set:
durations = self.durs[index]
duration_prior = None
if DurationPrior in self.sup_data_types_set:
if self.use_beta_binomial_interpolator:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = torch.from_numpy(self.beta_binomial_interpolator(mel_len, text_length.item()))
else:
prior_path = Path(self.sup_data_path) / f"pr_{audio_stem}.pt"
if prior_path.exists():
duration_prior = torch.load(prior_path)
else:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = beta_binomial_prior_distribution(text_length, mel_len)
duration_prior = torch.from_numpy(duration_prior)
torch.save(duration_prior, prior_path)
pitch, pitch_length = None, None
if Pitch in self.sup_data_types_set:
pitch_name = (
f"{audio_stem}_pitch_pyin_"
f"fmin{self.pitch_fmin}_fmax{self.pitch_fmax}_"
f"fl{self.win_length}_hs{self.hop_len}.pt"
)
pitch_path = Path(self.sup_data_path) / pitch_name
if pitch_path.exists():
pitch = torch.load(pitch_path).float()
else:
pitch, _, _ = librosa.pyin(
audio.numpy(),
fmin=self.pitch_fmin,
fmax=self.pitch_fmax,
frame_length=self.win_length,
sr=self.sample_rate,
fill_na=0.0,
)
pitch = torch.from_numpy(pitch).float()
torch.save(pitch, pitch_path)
if self.pitch_avg is not None and self.pitch_std is not None and self.pitch_norm:
pitch -= self.pitch_avg
pitch[pitch == -self.pitch_avg] = 0.0 # Zero out values that were perviously zero
pitch /= self.pitch_std
pitch_length = torch.tensor(len(pitch)).long()
energy, energy_length = None, None
if Energy in self.sup_data_types_set:
energy_path = Path(self.sup_data_path) / f"{audio_stem}_energy_wl{self.win_length}_hs{self.hop_len}.pt"
if energy_path.exists():
energy = torch.load(energy_path).float()
else:
spec = self.get_spec(audio)
energy = torch.linalg.norm(spec.squeeze(0), axis=0).float()
torch.save(energy, energy_path)
energy_length = torch.tensor(len(energy)).long()
speaker_id = None
if SpeakerID in self.sup_data_types_set:
speaker_id = torch.tensor(sample["speaker_id"]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
)
def __len__(self):
return len(self.data)
def join_data(self, data_dict):
result = []
for data_type in MAIN_DATA_TYPES + self.sup_data_types:
result.append(data_dict[data_type.name])
if issubclass(data_type, WithLens):
result.append(data_dict[f"{data_type.name}_lens"])
return tuple(result)
def general_collate_fn(self, batch):
(
_,
audio_lengths,
_,
tokens_lengths,
_,
log_mel_lengths,
durations_list,
duration_priors_list,
pitches,
pitches_lengths,
energies,
energies_lengths,
_,
) = zip(*batch)
max_audio_len = max(audio_lengths).item()
max_tokens_len = max(tokens_lengths).item()
max_log_mel_len = max(log_mel_lengths) if LogMel in self.sup_data_types_set else None
max_durations_len = max([len(i) for i in durations_list]) if Durations in self.sup_data_types_set else None
max_pitches_len = max(pitches_lengths).item() if Pitch in self.sup_data_types_set else None
max_energies_len = max(energies_lengths).item() if Energy in self.sup_data_types_set else None
if LogMel in self.sup_data_types_set:
log_mel_pad = torch.finfo(batch[0][2].dtype).tiny
duration_priors = (
torch.zeros(
len(duration_priors_list),
max([prior_i.shape[0] for prior_i in duration_priors_list]),
max([prior_i.shape[1] for prior_i in duration_priors_list]),
)
if DurationPrior in self.sup_data_types_set
else []
)
audios, tokens, log_mels, durations_list, pitches, energies, speaker_ids = [], [], [], [], [], [], []
for i, sample_tuple in enumerate(batch):
(
audio,
audio_len,
token,
token_len,
log_mel,
log_mel_len,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = sample_tuple
audio = general_padding(audio, audio_len.item(), max_audio_len)
audios.append(audio)
token = general_padding(token, token_len.item(), max_tokens_len, pad_value=self.text_tokenizer_pad_id)
tokens.append(token)
if LogMel in self.sup_data_types_set:
log_mels.append(general_padding(log_mel, log_mel_len, max_log_mel_len, pad_value=log_mel_pad))
if Durations in self.sup_data_types_set:
durations_list.append(general_padding(durations, len(durations), max_durations_len))
if DurationPrior in self.sup_data_types_set:
duration_priors[i, : duration_prior.shape[0], : duration_prior.shape[1]] = duration_prior
if Pitch in self.sup_data_types_set:
pitches.append(general_padding(pitch, pitch_length.item(), max_pitches_len))
if Energy in self.sup_data_types_set:
energies.append(general_padding(energy, energy_length.item(), max_energies_len))
if SpeakerID in self.sup_data_types_set:
speaker_ids.append(speaker_id)
data_dict = {
"audio": torch.stack(audios),
"audio_lens": torch.stack(audio_lengths),
"text": torch.stack(tokens),
"text_lens": torch.stack(tokens_lengths),
"log_mel": torch.stack(log_mels) if LogMel in self.sup_data_types_set else None,
"log_mel_lens": torch.stack(log_mel_lengths) if LogMel in self.sup_data_types_set else None,
"durations": torch.stack(durations_list) if Durations in self.sup_data_types_set else None,
"duration_prior": duration_priors if DurationPrior in self.sup_data_types_set else None,
"pitch": torch.stack(pitches) if Pitch in self.sup_data_types_set else None,
"pitch_lens": torch.stack(pitches_lengths) if Pitch in self.sup_data_types_set else None,
"energy": torch.stack(energies) if Energy in self.sup_data_types_set else None,
"energy_lens": torch.stack(energies_lengths) if Energy in self.sup_data_types_set else None,
"speaker_id": torch.stack(speaker_ids) if SpeakerID in self.sup_data_types_set else None,
}
return data_dict
def _collate_fn(self, batch):
data_dict = self.general_collate_fn(batch)
joined_data = self.join_data(data_dict)
return joined_data
class MixerTTSDataset(TTSDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _albert(self):
from transformers import AlbertTokenizer # noqa pylint: disable=import-outside-toplevel
self.lm_model_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
self.lm_padding_value = self.lm_model_tokenizer._convert_token_to_id('<pad>')
space_value = self.lm_model_tokenizer._convert_token_to_id('▁')
self.id2lm_tokens = {}
for i, d in enumerate(self.data):
raw_text = d["raw_text"]
assert isinstance(self.text_tokenizer, EnglishPhonemesTokenizer) or isinstance(
self.text_tokenizer, EnglishCharsTokenizer
)
preprocess_text_as_tts_input = self.text_tokenizer.text_preprocessing_func(raw_text)
lm_tokens_as_ids = self.lm_model_tokenizer.encode(preprocess_text_as_tts_input, add_special_tokens=False)
if self.text_tokenizer.pad_with_space:
lm_tokens_as_ids = [space_value] + lm_tokens_as_ids + [space_value]
self.id2lm_tokens[i] = lm_tokens_as_ids
def add_lm_tokens(self, **kwargs):
lm_model = kwargs.pop('lm_model')
if lm_model == "albert":
self._albert()
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
def __getitem__(self, index):
(
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = super().__getitem__(index)
lm_tokens = None
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.tensor(self.id2lm_tokens[index]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
lm_tokens,
)
def _collate_fn(self, batch):
batch = list(zip(*batch))
data_dict = self.general_collate_fn(list(zip(*batch[:13])))
lm_tokens_list = batch[13]
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.full(
(len(lm_tokens_list), max([lm_tokens.shape[0] for lm_tokens in lm_tokens_list])),
fill_value=self.lm_padding_value,
)
for i, lm_tokens_i in enumerate(lm_tokens_list):
lm_tokens[i, : lm_tokens_i.shape[0]] = lm_tokens_i
data_dict[LMTokens.name] = lm_tokens
joined_data = self.join_data(data_dict)
return joined_data
| [
"nemo.collections.tts.torch.helpers.general_padding",
"torch.from_numpy",
"transformers.AlbertTokenizer.from_pretrained",
"nemo.utils.logging.info",
"pathlib.Path",
"torch.cuda.amp.autocast",
"torch.view_as_real",
"torch.finfo",
"json.loads",
"pickle.load",
"librosa.filters.mel",
"torch.save",
"librosa.note_to_hz",
"nemo.collections.asr.parts.preprocessing.features.WaveformFeaturizer",
"torch.load",
"torch.stack",
"tqdm.tqdm",
"torch.tensor",
"nemo.collections.tts.torch.helpers.BetaBinomialInterpolator",
"nemo.collections.tts.torch.helpers.beta_binomial_prior_distribution"
]
| [((11774, 11822), 'nemo.collections.asr.parts.preprocessing.features.WaveformFeaturizer', 'WaveformFeaturizer', ([], {'sample_rate': 'self.sample_rate'}), '(sample_rate=self.sample_rate)\n', (11792, 11822), False, 'from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer\n'), ((13421, 13442), 'torch.load', 'torch.load', (['durs_file'], {}), '(durs_file)\n', (13431, 13442), False, 'import torch\n'), ((24412, 24461), 'transformers.AlbertTokenizer.from_pretrained', 'AlbertTokenizer.from_pretrained', (['"""albert-base-v2"""'], {}), "('albert-base-v2')\n", (24443, 24461), False, 'from transformers import AlbertTokenizer\n'), ((10207, 10275), 'nemo.utils.logging.info', 'logging.info', (['f"""Dataset contains {total_duration / 3600:.2f} hours."""'], {}), "(f'Dataset contains {total_duration / 3600:.2f} hours.')\n", (10219, 10275), False, 'from nemo.utils import logging\n'), ((10313, 10367), 'nemo.utils.logging.info', 'logging.info', (['f"""using {ignore_file} to prune dataset."""'], {}), "(f'using {ignore_file} to prune dataset.')\n", (10325, 10367), False, 'from nemo.utils import logging\n'), ((11520, 11667), 'nemo.utils.logging.info', 'logging.info', (['f"""Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains {(total_duration - pruned_duration) / 3600:.2f} hours."""'], {}), "(\n f'Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains {(total_duration - pruned_duration) / 3600:.2f} hours.'\n )\n", (11532, 11667), False, 'from nemo.utils import logging\n'), ((14093, 14119), 'nemo.collections.tts.torch.helpers.BetaBinomialInterpolator', 'BetaBinomialInterpolator', ([], {}), '()\n', (14117, 14119), False, 'from nemo.collections.tts.torch.helpers import BetaBinomialInterpolator, beta_binomial_prior_distribution, general_padding\n'), ((14207, 14231), 'librosa.note_to_hz', 'librosa.note_to_hz', (['"""C2"""'], {}), "('C2')\n", (14225, 14231), False, 'import librosa\n'), ((14284, 14308), 'librosa.note_to_hz', 'librosa.note_to_hz', (['"""C7"""'], {}), "('C7')\n", (14302, 14308), False, 'import librosa\n'), ((14627, 14665), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': '(False)'}), '(enabled=False)\n', (14650, 14665), False, 'import torch\n'), ((14937, 14975), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': '(False)'}), '(enabled=False)\n', (14960, 14975), False, 'import torch\n'), ((15274, 15304), 'pathlib.Path', 'Path', (["sample['audio_filepath']"], {}), "(sample['audio_filepath'])\n", (15278, 15304), False, 'from pathlib import Path\n'), ((22890, 22909), 'torch.stack', 'torch.stack', (['audios'], {}), '(audios)\n', (22901, 22909), False, 'import torch\n'), ((22937, 22963), 'torch.stack', 'torch.stack', (['audio_lengths'], {}), '(audio_lengths)\n', (22948, 22963), False, 'import torch\n'), ((22985, 23004), 'torch.stack', 'torch.stack', (['tokens'], {}), '(tokens)\n', (22996, 23004), False, 'import torch\n'), ((23031, 23058), 'torch.stack', 'torch.stack', (['tokens_lengths'], {}), '(tokens_lengths)\n', (23042, 23058), False, 'import torch\n'), ((8635, 8689), 'nemo.utils.logging.info', 'logging.info', (['f"""Loading dataset from {manifest_file}."""'], {}), "(f'Loading dataset from {manifest_file}.')\n", (8647, 8689), False, 'from nemo.utils import logging\n'), ((8718, 8725), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (8722, 8725), False, 'from tqdm import tqdm\n'), ((10685, 10701), 'pathlib.Path', 'Path', (['audio_path'], {}), '(audio_path)\n', (10689, 10701), False, 'from pathlib import Path\n'), ((13487, 13512), 'pathlib.Path', 'Path', (["d['audio_filepath']"], {}), "(d['audio_filepath'])\n", (13491, 13512), False, 'from pathlib import Path\n'), ((14786, 14810), 'torch.view_as_real', 'torch.view_as_real', (['spec'], {}), '(spec)\n', (14804, 14810), False, 'import torch\n'), ((15491, 15526), 'torch.tensor', 'torch.tensor', (["sample['text_tokens']"], {}), "(sample['text_tokens'])\n", (15503, 15526), False, 'import torch\n'), ((15834, 15854), 'torch.load', 'torch.load', (['mel_path'], {}), '(mel_path)\n', (15844, 15854), False, 'import torch\n'), ((17522, 17546), 'pathlib.Path', 'Path', (['self.sup_data_path'], {}), '(self.sup_data_path)\n', (17526, 17546), False, 'from pathlib import Path\n'), ((18046, 18075), 'torch.save', 'torch.save', (['pitch', 'pitch_path'], {}), '(pitch, pitch_path)\n', (18056, 18075), False, 'import torch\n'), ((18526, 18550), 'pathlib.Path', 'Path', (['self.sup_data_path'], {}), '(self.sup_data_path)\n', (18530, 18550), False, 'from pathlib import Path\n'), ((18864, 18895), 'torch.save', 'torch.save', (['energy', 'energy_path'], {}), '(energy, energy_path)\n', (18874, 18895), False, 'import torch\n'), ((20780, 20810), 'torch.finfo', 'torch.finfo', (['batch[0][2].dtype'], {}), '(batch[0][2].dtype)\n', (20791, 20810), False, 'import torch\n'), ((23083, 23104), 'torch.stack', 'torch.stack', (['log_mels'], {}), '(log_mels)\n', (23094, 23104), False, 'import torch\n'), ((23181, 23209), 'torch.stack', 'torch.stack', (['log_mel_lengths'], {}), '(log_mel_lengths)\n', (23192, 23209), False, 'import torch\n'), ((23283, 23310), 'torch.stack', 'torch.stack', (['durations_list'], {}), '(durations_list)\n', (23294, 23310), False, 'import torch\n'), ((23484, 23504), 'torch.stack', 'torch.stack', (['pitches'], {}), '(pitches)\n', (23495, 23504), False, 'import torch\n'), ((23578, 23606), 'torch.stack', 'torch.stack', (['pitches_lengths'], {}), '(pitches_lengths)\n', (23589, 23606), False, 'import torch\n'), ((23676, 23697), 'torch.stack', 'torch.stack', (['energies'], {}), '(energies)\n', (23687, 23697), False, 'import torch\n'), ((23773, 23802), 'torch.stack', 'torch.stack', (['energies_lengths'], {}), '(energies_lengths)\n', (23784, 23802), False, 'import torch\n'), ((23877, 23901), 'torch.stack', 'torch.stack', (['speaker_ids'], {}), '(speaker_ids)\n', (23888, 23901), False, 'import torch\n'), ((8106, 8125), 'pathlib.Path', 'Path', (['sup_data_path'], {}), '(sup_data_path)\n', (8110, 8125), False, 'from pathlib import Path\n'), ((8754, 8770), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (8764, 8770), False, 'import json\n'), ((10471, 10485), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10482, 10485), False, 'import pickle\n'), ((12188, 12301), 'librosa.filters.mel', 'librosa.filters.mel', (['self.sample_rate', 'self.n_fft'], {'n_mels': 'self.n_mels', 'fmin': 'self.lowfreq', 'fmax': 'self.highfreq'}), '(self.sample_rate, self.n_fft, n_mels=self.n_mels, fmin=\n self.lowfreq, fmax=self.highfreq)\n', (12207, 12301), False, 'import librosa\n'), ((15436, 15467), 'torch.tensor', 'torch.tensor', (['features.shape[0]'], {}), '(features.shape[0])\n', (15448, 15467), False, 'import torch\n'), ((15900, 15924), 'pathlib.Path', 'Path', (['self.sup_data_path'], {}), '(self.sup_data_path)\n', (15904, 15924), False, 'from pathlib import Path\n'), ((16019, 16039), 'torch.load', 'torch.load', (['mel_path'], {}), '(mel_path)\n', (16029, 16039), False, 'import torch\n'), ((16136, 16165), 'torch.save', 'torch.save', (['log_mel', 'mel_path'], {}), '(log_mel, mel_path)\n', (16146, 16165), False, 'import torch\n'), ((16237, 16267), 'torch.tensor', 'torch.tensor', (['log_mel.shape[1]'], {}), '(log_mel.shape[1])\n', (16249, 16267), False, 'import torch\n'), ((16745, 16769), 'pathlib.Path', 'Path', (['self.sup_data_path'], {}), '(self.sup_data_path)\n', (16749, 16769), False, 'from pathlib import Path\n'), ((16872, 16894), 'torch.load', 'torch.load', (['prior_path'], {}), '(prior_path)\n', (16882, 16894), False, 'import torch\n'), ((17017, 17071), 'nemo.collections.tts.torch.helpers.beta_binomial_prior_distribution', 'beta_binomial_prior_distribution', (['text_length', 'mel_len'], {}), '(text_length, mel_len)\n', (17049, 17071), False, 'from nemo.collections.tts.torch.helpers import BetaBinomialInterpolator, beta_binomial_prior_distribution, general_padding\n'), ((17109, 17141), 'torch.from_numpy', 'torch.from_numpy', (['duration_prior'], {}), '(duration_prior)\n', (17125, 17141), False, 'import torch\n'), ((17162, 17200), 'torch.save', 'torch.save', (['duration_prior', 'prior_path'], {}), '(duration_prior, prior_path)\n', (17172, 17200), False, 'import torch\n'), ((19059, 19093), 'torch.tensor', 'torch.tensor', (["sample['speaker_id']"], {}), "(sample['speaker_id'])\n", (19071, 19093), False, 'import torch\n'), ((22061, 22138), 'nemo.collections.tts.torch.helpers.general_padding', 'general_padding', (['log_mel', 'log_mel_len', 'max_log_mel_len'], {'pad_value': 'log_mel_pad'}), '(log_mel, log_mel_len, max_log_mel_len, pad_value=log_mel_pad)\n', (22076, 22138), False, 'from nemo.collections.tts.torch.helpers import BetaBinomialInterpolator, beta_binomial_prior_distribution, general_padding\n'), ((26096, 26134), 'torch.tensor', 'torch.tensor', (['self.id2lm_tokens[index]'], {}), '(self.id2lm_tokens[index])\n', (26108, 26134), False, 'import torch\n'), ((9775, 9882), 'nemo.utils.logging.info', 'logging.info', (['"""Not all audio files have duration information. Duration logging will be disabled."""'], {}), "(\n 'Not all audio files have duration information. Duration logging will be disabled.'\n )\n", (9787, 9882), False, 'from nemo.utils import logging\n'), ((15783, 15797), 'pathlib.Path', 'Path', (['mel_path'], {}), '(mel_path)\n', (15787, 15797), False, 'from pathlib import Path\n'), ((17620, 17642), 'torch.load', 'torch.load', (['pitch_path'], {}), '(pitch_path)\n', (17630, 17642), False, 'import torch\n'), ((17998, 18021), 'torch.from_numpy', 'torch.from_numpy', (['pitch'], {}), '(pitch)\n', (18014, 18021), False, 'import torch\n'), ((18678, 18701), 'torch.load', 'torch.load', (['energy_path'], {}), '(energy_path)\n', (18688, 18701), False, 'import torch\n'), ((8574, 8593), 'pathlib.Path', 'Path', (['manifest_file'], {}), '(manifest_file)\n', (8578, 8593), False, 'from pathlib import Path\n'), ((10390, 10407), 'pathlib.Path', 'Path', (['ignore_file'], {}), '(ignore_file)\n', (10394, 10407), False, 'from pathlib import Path\n'), ((15131, 15153), 'torch.finfo', 'torch.finfo', (['mel.dtype'], {}), '(mel.dtype)\n', (15142, 15153), False, 'import torch\n')] |
from flask import Flask, request, jsonify
from flask_cors import CORS
from run import run_ansys
from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check
ansys_processing_count = 0
# debug
# import ipdb; ipdb.set_trace()
app = Flask(__name__)
CORS(app) # local development cors
@app.route('/run_simu', methods=["POST"])
def run_simulation():
global ansys_processing_count
ansys_processing_count += 1
ctx = {
"request": request.get_json(),
"allow_run": True,
"process": {
"limit": 4,
"count": ansys_processing_count,
},
"start_run_response": {"msg": "start run at background"},
"error": {
"validate": {"msg": ""}
}
}
if spec_present(ctx) and \
data_type_validate(ctx) and \
spec_keys_validate(ctx) and \
ansys_overload_check(ctx):
ctx = run_ansys(self.ctx)
else:
return jsonify(ctx["error"]["validate"])
return jsonify(ctx["response"])
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| [
"flask_cors.CORS",
"flask.Flask",
"api.validate.spec_keys_validate",
"flask.request.get_json",
"api.validate.ansys_overload_check",
"api.validate.data_type_validate",
"api.validate.spec_present",
"run.run_ansys",
"flask.jsonify"
]
| [((271, 286), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (276, 286), False, 'from flask import Flask, request, jsonify\n'), ((287, 296), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (291, 296), False, 'from flask_cors import CORS\n'), ((1035, 1059), 'flask.jsonify', 'jsonify', (["ctx['response']"], {}), "(ctx['response'])\n", (1042, 1059), False, 'from flask import Flask, request, jsonify\n'), ((486, 504), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (502, 504), False, 'from flask import Flask, request, jsonify\n'), ((783, 800), 'api.validate.spec_present', 'spec_present', (['ctx'], {}), '(ctx)\n', (795, 800), False, 'from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check\n'), ((819, 842), 'api.validate.data_type_validate', 'data_type_validate', (['ctx'], {}), '(ctx)\n', (837, 842), False, 'from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check\n'), ((861, 884), 'api.validate.spec_keys_validate', 'spec_keys_validate', (['ctx'], {}), '(ctx)\n', (879, 884), False, 'from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check\n'), ((903, 928), 'api.validate.ansys_overload_check', 'ansys_overload_check', (['ctx'], {}), '(ctx)\n', (923, 928), False, 'from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check\n'), ((944, 963), 'run.run_ansys', 'run_ansys', (['self.ctx'], {}), '(self.ctx)\n', (953, 963), False, 'from run import run_ansys\n'), ((989, 1022), 'flask.jsonify', 'jsonify', (["ctx['error']['validate']"], {}), "(ctx['error']['validate'])\n", (996, 1022), False, 'from flask import Flask, request, jsonify\n')] |
from django.db import models
class Category(models.Model):
title = models.CharField(max_length=20)
class Meta:
db_table = 'category'
verbose_name = ("Category")
verbose_name_plural = ("Categories")
def __str__(self):
return self.title
| [
"django.db.models.CharField"
]
| [((77, 108), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (93, 108), False, 'from django.db import models\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.