metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jell0213/MUNIT_DataHiding",
"score": 3
}
|
#### File: MUNIT_DataHiding/方法一/mod3嵌密.py
```python
from skimage import io,color,img_as_ubyte
import random
import os
from 畫直方圖 import PH
random.seed(100)
def Embed_mod(test,num,directory,mod_num):
for i in range(num):
if not os.path.exists(test+"{:08d}".format(i)) : #建立路徑(資料夾)
os.mkdir(test+"{:08d}".format(i))
image = io.imread(directory+'\\'+test+"{:08d}.png".format(i))
io.imsave(os.path.join(test+"{:08d}".format(i),test+'{:08d}.png'.format(i)),image)
location_map=[]
f = open(os.path.join(test+"{:08d}".format(i),test+'{:08d}_loaction map.txt'.format(i)),'w') #紀錄location.txt
for col in range(image.shape[0]): #輸入原圖,記錄location map(0:有嵌密,1:沒嵌密),(255,255,255)為白色不可嵌密處
location_map.append([])
for row in range(image.shape[1]):
location_map[col].append([])
if image[col,row,0] == 255 and image[col, row,1] == 255 and image[col,row,2] == 255:
location_map[col][row]= 1
else :
location_map[col][row]= 0
f.write(str(location_map[col][row]))
f.write('\n')
f.close()
image_lc = io.imread(directory+'\\'+test+"{:08d}.png".format(i)) #標記嵌密的地方的圖(紅:有嵌密,白,沒嵌密)
for col in range(image_lc.shape[0]):
for row in range(image_lc.shape[1]):
if location_map[col][row] == 0 :
image_lc[col,row] = [255,0,0]
else :
image_lc[col,row] = [255,255,255]
io.imsave(os.path.join(test+"{:08d}".format(i),test+'{:08d}_lc.png'.format(i)),image_lc)
image_embed = io.imread(directory+'\\'+test+"{:08d}.png".format(i)) #嵌密
f = open(os.path.join(test+"{:08d}".format(i),test+'{:08d}_code.txt'.format(i)),'w')
for col in range(image_embed.shape[0]):
for row in range(image_embed.shape[1]):
if image_lc[col,row,0] == 255 and image_lc[col,row,1] == 0 and image_lc[col,row,2] == 0: #在可嵌密處嵌密
image_embed[col,row,0],code=Mod(image_embed[col,row,0],mod_num)
f.write(str(code))
image_embed[col,row,1],code=Mod(image_embed[col,row,1],mod_num)
f.write(str(code))
image_embed[col,row,2],code=Mod(image_embed[col,row,2],mod_num)
f.write(str(code))
f.close()
io.imsave(os.path.join(test+"{:08d}".format(i),test+'{:08d}_embed.png'.format(i)),image_embed)
PH(os.path.join(test+"{:08d}".format(i),test+'{:08d}.png'.format(i)) , os.path.join(test+"{:08d}".format(i),test+'{:08d}_embed.png'.format(i)) , os.path.join(test+"{:08d}".format(i),test+'{:08d}直方圖.xlsx'.format(i)) )
def Mod(pixel,mod_num): #計算mod的嵌密結果
s=random.randint(0,mod_num-1) #mod值沒有防呆,根據此值的範圍隨機產生數值
p=pixel-(pixel%mod_num) #去掉餘數
if p > 255-mod_num : #避免overflow
p = p - mod_num
p2=p+s #加上密碼
p3=[] #生成三個數值取差異最小的,作為嵌密結果
p3.append(p2) #p,p+mod,p-mod
if p2 > mod_num-1 : #避免小於0
p3.append(p2-mod_num)
else :
p3.append(p2+(mod_num*2))
if p2 < 255-mod_num+1 : #避免大於255
p3.append(p2+mod_num)
else :
p3.append(p2-(mod_num*2))
if abs(p3[0]-pixel)<=abs(p3[1]-pixel) and abs(p3[0]-pixel)<=abs(p3[2]-pixel) :
return p3[0],s
elif abs(p3[1]-pixel)<=abs(p3[0]-pixel) and abs(p3[1]-pixel)<=abs(p3[2]-pixel) :
return p3[1],s
else :
return p3[2],s
Embed_mod('output',1,'256鞋20個',5) #檔名+數量+資料夾名+mod值
```
#### File: MUNIT_DataHiding/方法一/MUNIT算capacity(加速).py
```python
from skimage import io
from openpyxl import Workbook
import openpyxl
import os
import math
import time
def cal_capacity(folder_name,
num_image,
num_mod,
embed_ratio):
wb = Workbook()
ws = wb.active
ws.append(["embed_mod3","mod="+str(num_mod),str(embed_ratio)+"%","256*256"])
ws.append(["","","文字檔","","","","","","","圖片檔"])
ws.append(["檔名","嵌密量","大小(bit)","壓縮大小","壓縮率","嵌密壓縮率","淨藏量","rare bpp","bpp","大小(bit)","壓縮大小","壓縮率","嵌密壓縮率","淨藏量","pure bpp","bpp"])
wb.save("./embed_mod3/embed_mod3_capacity.xlsx")
wb = openpyxl.load_workbook("./embed_mod3/embed_mod3_capacity.xlsx")
ws = wb['Sheet']
a=[]#儲存各項平均值
for i in range(20):
a.append(0)
for i in range(num_image):
f_lc= open("./embed_mod3/output{:08d}".format(i)+"/output{:08d}_location map.txt".format(i),'r') #打開location map.txt來計算capacity
image_lc = io.imread("./embed_mod3/output{:08d}".format(i)+"/output{:08d}_lc.png".format(i))
count = 0
for row in range(image_lc.shape[0]): #計算capacity,非白區域可嵌密,三個channel,對mod以2為底取log(單位:bit)
for col in range(image_lc.shape[1]):
bit=f_lc.read(1)
if bit== "0" or bit== "2" :
count+=1
count*=3*math.log(num_mod,2)*embed_ratio/100 #capacity
size_lc=os.path.getsize("./embed_mod3/output{:08d}".format(i)+"/output{:08d}_location map.txt".format(i))*8#txt大小
size_image_lc=os.path.getsize("./embed_mod3/output{:08d}".format(i)+"/output{:08d}_lc.png".format(i))*8#png大小
size_lc_gz=os.path.getsize("./embed_mod3/output{:08d}".format(i)+"/output{:08d}_location map.tar.gz".format(i))*8#txt壓縮大小
size_image_lc_gz=os.path.getsize("./embed_mod3/output{:08d}".format(i)+"/output{:08d}_lc.tar.gz".format(i))*8#png壓縮大小
compress_lc=(size_lc_gz/size_lc)*100#lc壓縮率
compress_image_lc=(size_image_lc_gz/size_image_lc)*100#lm壓縮率
compress_code_lc=(size_lc_gz/count)*100#lc嵌密壓縮率(%)
compress_code_image_lc=(size_image_lc_gz/count)*100#lm嵌密壓縮率(%)
net_capacity_lc=count-size_lc_gz#lc.txt淨藏量
net_capacity_image_lc=count-size_image_lc_gz#lc.png淨藏量
net_embedding_ratio_lc=net_capacity_lc/(256*256)#lc.txt淨嵌入率(%)
net_embedding_ratio_image_lc=net_capacity_image_lc/(256*256)#lc.png淨嵌入率(%)
embedding_ratio_lc=count/(256*256)#嵌入率(%)(txt和png相同)
ws.append(["output{:08d}".format(i),
float('%.2f'%round(count,2)), #四捨五入到指定小數位
float('%.2f'%round(size_lc,2)),
float('%.2f'%round(size_lc_gz,2)),
str(float('%.1f'%round(compress_lc,1)))+'%',
str(float('%.1f'%round(compress_code_lc,1)))+'%',
float('%.2f'%round(net_capacity_lc,2)),
float('%.2f'%round(net_embedding_ratio_lc,2)),
float('%.2f'%round(embedding_ratio_lc,2)),
float('%.2f'%round(size_image_lc,2)),
float('%.2f'%round(size_image_lc_gz,2)),
str(float('%.1f'%round(compress_image_lc,1)))+'%',
str(float('%.1f'%round(compress_code_image_lc,1)))+'%',
float('%.2f'%round(net_capacity_image_lc,2)),
float('%.2f'%round(net_embedding_ratio_image_lc,2)),
float('%.2f'%round(embedding_ratio_lc,2))])
a[0]+=count
a[1]+=size_lc
a[2]+=size_lc_gz
a[3]+=compress_lc
a[4]+=compress_code_lc
a[5]+=net_capacity_lc
a[6]+=net_embedding_ratio_lc
a[7]+=embedding_ratio_lc
a[8]+=size_image_lc
a[9]+=size_image_lc_gz
a[10]+=compress_image_lc
a[11]+=compress_code_image_lc
a[12]+=net_capacity_image_lc
a[13]+=net_embedding_ratio_image_lc
a[14]+=embedding_ratio_lc
f_lc.close()
for i in range(20):
a[i]/=num_image
ws.append(["檔名","嵌密量","大小(bit)","壓縮大小","壓縮率","嵌密壓縮率","淨藏量","pure bpp","bpp","大小(bit)","壓縮大小","壓縮率","嵌密壓縮率","淨藏量","pure bpp","bpp"])
ws.append([
"",
float('%.2f'%round(a[0],2)),
float('%.2f'%round(a[1],2)),
float('%.2f'%round(a[2],2)),
str(float('%.1f'%round(a[3],1)))+'%',
str(float('%.1f'%round(a[4],1)))+'%',
float('%.2f'%round(a[5],2)),
float('%.2f'%round(a[6],2)),
float('%.2f'%round(a[7],2)),
float('%.2f'%round(a[8],2)),
float('%.2f'%round(a[9],2)),
str(float('%.1f'%round(a[10],1)))+'%',
str(float('%.1f'%round(a[11],1)))+'%',
float('%.2f'%round(a[12],2)),
float('%.2f'%round(a[13],2)),
float('%.2f'%round(a[14],2)),
])
wb.save("./embed_mod3/embed_mod3_capacity.xlsx")#寫檔後存檔
embed_ratio=int(input("embedding ratio(%) = "))
tStart = time.time()#計時開始
cal_capacity("embed_mod3",5000,3,embed_ratio)
tEnd = time.time()#計時結束
wb = openpyxl.load_workbook("./embed_mod3/embed_mod3_capacity.xlsx")
ws = wb['Sheet']
ws.append(["total time",str(round(tEnd-tStart,2))+" s"])
wb.save("./embed_mod3/embed_mod3_capacity.xlsx")#寫檔後存檔
print(round(tEnd-tStart,2))
```
#### File: MUNIT_DataHiding/方法二/無LM嵌密-100%.py
```python
from skimage import io,color,img_as_ubyte
import random
import os
from 畫直方圖 import PH
random.seed(100)
def Embed_mod(in_dir,out_dir,mod_num,num):
print("embed ratio = 100%") #嵌密率100%
if not os.path.exists(out_dir): #建立路徑(資料夾)
os.mkdir(out_dir)
if not os.path.exists(out_dir+"/_Cover") : #-----------建立路徑_Cover,用來進行IQA比較
os.mkdir(out_dir+"/_Cover")
if not os.path.exists(out_dir+"/_Stego") : #-----------建立路徑_Stego,用來進行IQA比較
os.mkdir(out_dir+"/_Stego")
for i in range(num):
if not os.path.exists(out_dir+"/output{:08d}".format(i)) : #-----------建立路徑(各圖片子資料夾)
os.mkdir(out_dir+"/output{:08d}".format(i))
image = io.imread(in_dir+"/output{:08d}.png".format(i))
io.imsave(out_dir+"/output{:08d}".format(i)+"/output{:08d}.png".format(i),image) #-----------從in_dir複製原圖到out_dir
io.imsave(out_dir+"/_Cover"+"/output{:08d}.png".format(i),image) #-----------從in_dir複製原圖到_Cover資料夾(用來進行IQA比較)
location_map=[]
f = open(out_dir+"/output{:08d}".format(i)+"/output{:08d}_lc.txt".format(i),'w') #-----------紀錄lc.txt
image_lc = io.imread(in_dir+"/output{:08d}.png".format(i)) #-----------記錄lc.png(png檔)(紅:有嵌密,白:沒嵌密)
for row in range(image.shape[0]): #輸入原圖,記錄lc.txt,(255,255,255)為白色不可嵌密處=1,其他地方設為紅色可嵌密處=0
location_map.append([])
for col in range(image.shape[1]):
location_map[row].append([])
if image[row,col,0] == 255 and image[row, col,1] == 255 and image[row,col,2] == 255: #白色區域的lc
location_map[row][col]= 1
image_lc[row,col] = [255,255,255]
else : #其他紅色區域的lc
location_map[row][col]= 0
image_lc[row,col] = [255,0,0]
f.write(str(location_map[row][col]))
f.close()
io.imsave(out_dir+"/output{:08d}".format(i)+"/output{:08d}_lc.png".format(i),image_lc)
image_embed = io.imread(in_dir+"/output{:08d}.png".format(i)) #嵌密
f = open(out_dir+"/output{:08d}".format(i)+"/output{:08d}_code.txt".format(i),'w') #紀錄秘密訊息
pixel_num = 0 #pixel_num用來處理嵌密率(僅在可嵌區域中遞增)
embed_count = 0
for row in range(image_embed.shape[0]):
for col in range(image_embed.shape[1]):
if location_map[row][col]!= 1 : #非白區可嵌密
if (pixel_num%2) < 2 : #pixel_num對100取餘數,如果小於嵌密率,就進行嵌密
image_embed[row,col,0],code=Mod(image_embed[row,col,0],mod_num) #回傳兩值(嵌密,祕密訊息)
f.write(str(code))
image_embed[row,col,1],code=Mod(image_embed[row,col,1],mod_num)
f.write(str(code))
image_embed[row,col,2],code=Mod(image_embed[row,col,2],mod_num)
f.write(str(code))
embed_count += 1 #embed_count紀錄實際有嵌密的pixel數量(僅檢驗用)
pixel_num += 1
print(embed_count)
f.close()
########################################################################################################################################################################檢查非白
location_map2=[]
f2 = open(out_dir+"/output{:08d}".format(i)+"/output{:08d}_lc2.txt".format(i),'w') #-----------紀錄lc2.txt
for row in range(image.shape[0]): #輸入原圖,記錄lc2.txt,(255,255,255)為白色不可嵌密處=1,其他地方設為紅色可嵌密處=0
location_map2.append([]) #初始化lc2[256][256]
for col in range(image.shape[1]):
location_map2[row].append([])
if image_embed[row,col,0] == 255 and image_embed[row, col,1] == 255 and image_embed[row,col,2] == 255:#白色區域的lc2
location_map2[row][col]= 1
else : #其他紅色區域的lc2
location_map2[row][col]= 0
f2.write(str(location_map2[row][col]))
change_num=0 #change_num紀錄修正的像素數量
for row in range(image_embed.shape[0]):
for col in range(image_embed.shape[1]): #再次檢查stego
if location_map[row][col] == 0 and location_map2[row][col] == 1: #對兩個lm進行檢查,若原本可嵌密的地方變成白區(255,255,255),就代表此處要被調整B=B-3
image_embed[row,col,2]=image_embed[row,col,2]-mod_num
change_num=change_num+1
io.imsave(out_dir+"/output{:08d}".format(i)+"/output{:08d}_embed.png".format(i),image_embed) #------------儲存嵌密圖片
io.imsave(out_dir+"/_Stego"+"/output{:08d}_embed.png".format(i),image_embed) #------------儲存嵌密圖片到_Stego資料夾(用來進行IQA比較)
PH(out_dir+"/output{:08d}".format(i)+"/output{:08d}.png".format(i) #使用畫直方圖.py
,out_dir+"/output{:08d}".format(i)+"/output{:08d}_embed.png".format(i)
,out_dir+"/output{:08d}".format(i)+"/output{:08d}直方圖.xlsx".format(i))
f2.close()
f3=open(out_dir+"/output{:08d}".format(i)+"/output{:08d}_change.txt".format(i),'w')
f3.write(str(change_num)+'\n')
for row in range(image_embed.shape[0]):
for col in range(image_embed.shape[1]): #再次檢查stego
if location_map[row][col] == 0 and location_map2[row][col] == 1:
f3.write('1')
else :
f3.write('0')
f3.close()
def Mod(pixel,mod_num): #計算mod的嵌密結果
s=random.randint(0,mod_num-1) #mod值沒有防呆,根據此值的範圍隨機產生數值
r=pixel%mod_num
d=((s-r)+mod_num)%mod_num
if d == 0:
pp=pixel
elif d < (mod_num/2) :
pp=pixel+d
else :
pp=pixel+d-mod_num
if pp<0 :
pp=pp+mod_num
elif pp > 255 :
pp=pp-mod_num
else:
pp=pp
return pp,s
Embed_mod('D:\\108resercher\\====######RESEARCH######====\\GAN-research\\10000','D:\\108resercher\\====######RESEARCH######====\\GAN-research\\12.8\\100%MOD3',3,5000)
```
#### File: MUNIT_DataHiding/方法二/無LM算capacity.py
```python
from skimage import io
from openpyxl import Workbook
import openpyxl
import os
import math
import time
def cal_capacity(in_dir,
num_image,
num_mod,
embed_ratio):
wb = Workbook()
ws = wb.active
ws.append(["無LM","mod="+str(num_mod),str(embed_ratio)+"%","256*256"])
ws.append(["檔名","嵌密量","bpp"])
a=[] #儲存各項平均值
for i in range(2):
a.append(0)
for i in range(num_image):
f_code= open(in_dir+"/output{:08d}".format(i)+"/output{:08d}_code.txt".format(i),'r') #打開location map.txt來計算capacity
words = f_code.read()
num_words = len(words)
num_words*=math.log(num_mod,2) #capacity
bpp=num_words/(256*256) #嵌入率(%)(txt和png相同)
ws.append(["output{:08d}".format(i),
float('%.2f'%round(num_words,2)), #四捨五入到指定小數位
float('%.2f'%round(bpp,2))])
a[0]+=num_words
a[1]+=bpp
if i % 250 == 0 :
print(i)
for i in range(2):
a[i]/=num_image
ws.append(["檔名","嵌密量","bpp"])
ws.append([
"",
float('%.2f'%round(a[0],2)),
float('%.2f'%round(a[1],2)),
])
wb.save(in_dir+"/NLM-mod{:d}_capacity".format(num_mod)+"({:d}%).xlsx".format(embed_ratio)) #寫檔後存檔
#---------------------------------------------------------------------------設定區
in_dir="D:\\108resercher\\====######RESEARCH######====\\GAN-research\\12.8\\無LM嵌密結果\\100%MOD3"
num_image = 5000
num_mod = 3
embed_ratio= 100
#---------------------------------------------------------------------------設定區
tStart = time.time() #計時開始
cal_capacity(in_dir,num_image,num_mod,embed_ratio) #執行程式
tEnd = time.time() #計時結束
wb = openpyxl.load_workbook(in_dir+"/NLM-mod{:d}_capacity".format(num_mod)+"({:d}%).xlsx".format(embed_ratio))
ws = wb['Sheet']
ws.append(["total time",str(round(tEnd-tStart,2))+" s"])
wb.save(in_dir+"/NLM-mod{:d}_capacity".format(num_mod)+"({:d}%).xlsx".format(embed_ratio)) #寫檔後存檔
```
#### File: MUNIT_DataHiding/第二篇論文/縮小影像.py
```python
<<<<<<< HEAD
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#5000張256*256---->15秒
from PIL import Image
from os.path import join
from os import mkdir
import os
'''
def resize_small(path_input,path_output,image_number,image_type):
path_image = join(path_input,image_number+image_type)
img = Image.open(path_image)
new_img = img.resize((64, 64))
path_image2 = join(path_output,image_number+"_small.png")
new_img.save(path_image2)
path_input = 'C:/Users/li/Desktop/test'
path_output = 'C:/Users/li/Desktop/test/test2'
mkdir(path_output)
for i in range(3):
image_number="output{:08d}".format(i)
image_type="_embed.png"
resize_small(path_input,path_output,image_number,image_type)
'''
from os import listdir
from skimage import io
from os.path import join
def batch_resize(path_input,path_output,size):
files = listdir(path_input)# 取得所有檔案與子目錄名稱
for image_name in files:
image_path = join(path_input, image_name)
image = Image.open(image_path)
resize_image = image.resize((size, size))
#resize_image_name = image_name.split('.')[0]+'_'+str(size)
resize_image_name = image_name.split('.')[0]
resize_image_path = join(path_output,resize_image_name+".png")
resize_image.save(resize_image_path)
path_input = r'D:\108RE\第二篇論文\BOWS_PNG-rename\BOWS_PNG10000'
#size = int(input("input size : "))
size = 256
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
size = 128
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
size = 64
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
size = 32
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
size = 16
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
size = 8
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
=======
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#5000張256*256---->15秒
from PIL import Image
from os.path import join
from os import mkdir
import os
'''
def resize_small(path_input,path_output,image_number,image_type):
path_image = join(path_input,image_number+image_type)
img = Image.open(path_image)
new_img = img.resize((64, 64))
path_image2 = join(path_output,image_number+"_small.png")
new_img.save(path_image2)
path_input = 'C:/Users/li/Desktop/test'
path_output = 'C:/Users/li/Desktop/test/test2'
mkdir(path_output)
for i in range(3):
image_number="output{:08d}".format(i)
image_type="_embed.png"
resize_small(path_input,path_output,image_number,image_type)
'''
from os import listdir
from skimage import io
from os.path import join
def batch_resize(path_input,path_output,size):
files = listdir(path_input)# 取得所有檔案與子目錄名稱
for image_name in files:
image_path = join(path_input, image_name)
image = Image.open(image_path)
resize_image = image.resize((size, size))
resize_image_name = image_name.split('.')[0]+'_'+str(size)
resize_image_path = join(path_output,resize_image_name+".png")
resize_image.save(resize_image_path)
path_input = r'D:\108RE\第二篇論文\BOWS_PNG\BOWS_PNG10000'
#size = int(input("input size : "))
size = 256
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
size = 128
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
size = 64
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
size = 32
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
size = 16
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
size = 8
path_output = path_input + str(size) + '-' + str(size)
if not os.path.isfile(path_output):
mkdir(path_output)
batch_resize(path_input,path_output,size)
>>>>>>> 626064154e3f93f41a487db1b25eef2ea9e15c63
```
|
{
"source": "jellc/verification-helper",
"score": 3
}
|
#### File: onlinejudge_verify/languages/models.py
```python
import abc
import pathlib
from typing import *
from onlinejudge_verify.languages.special_comments import list_special_comments
class LanguageEnvironment(object):
@abc.abstractmethod
def compile(self, path: pathlib.Path, *, basedir: pathlib.Path, tempdir: pathlib.Path) -> None:
"""
:throws Exception:
"""
raise NotImplementedError
@abc.abstractmethod
def get_execute_command(self, path: pathlib.Path, *, basedir: pathlib.Path, tempdir: pathlib.Path) -> List[str]:
raise NotImplementedError
class Language(object):
def list_attributes(self, path: pathlib.Path, *, basedir: pathlib.Path) -> Dict[str, str]:
"""
:throws Exception:
"""
return list_special_comments(path)
@abc.abstractmethod
def list_dependencies(self, path: pathlib.Path, *, basedir: pathlib.Path) -> List[pathlib.Path]:
"""
:throws Exception:
"""
raise NotImplementedError
@abc.abstractmethod
def bundle(self, path: pathlib.Path, *, basedir: pathlib.Path) -> bytes:
"""
:throws Exception:
:throws NotImplementedError:
"""
raise NotImplementedError
def is_verification_file(self, path: pathlib.Path, *, basedir: pathlib.Path) -> bool:
return '.test.' in path.name
@abc.abstractmethod
def list_environments(self, path: pathlib.Path, *, basedir: pathlib.Path) -> Sequence[LanguageEnvironment]:
raise NotImplementedError
```
#### File: onlinejudge_verify/languages/special_comments.py
```python
import functools
import pathlib
import re
from logging import getLogger
from typing import *
logger = getLogger(__name__)
# special comments like Vim and Python: see https://www.python.org/dev/peps/pep-0263/
@functools.lru_cache(maxsize=None)
def list_special_comments(path: pathlib.Path) -> Dict[str, str]:
pattern = re.compile(r'\bverify-helper:\s*([0-9A-Z_]+)(?:\s(.*))?$')
failure_pattern = re.compile(r'\bverify-helper:')
attributes = {}
with open(path, 'rb') as fh:
for line in fh.read().decode().splitlines():
matched = pattern.search(line)
if matched:
key = matched.group(1)
value = (matched.group(2) or '').strip()
attributes[key] = value
elif failure_pattern.search(line):
logger.warning('broken verify-helper special comment found: %s', line)
return attributes
@functools.lru_cache(maxsize=None)
def list_doxygen_annotations(path: pathlib.Path) -> Dict[str, str]:
pattern = re.compile(r'@(title|category|brief|docs|see|sa|ignore) (.*)')
attributes = {}
with open(path, 'rb') as fh:
for line in fh.read().decode().splitlines():
matched = pattern.search(line)
if matched:
key = matched.group(1)
value = matched.group(2).strip()
if key == 'docs':
attributes['_deprecated_at_docs'] = value
logger.warning('deprecated annotation: "@%s %s" in %s. use front-matter style instead', key, value, str(path))
elif key in ('title', 'brief'):
if 'document_title' in attributes:
continue
attributes['document_title'] = value
elif key in ('category', 'see', 'sa', 'ignore'):
logger.debug('ignored annotation: "@%s %s" in %s', key, value, str(path))
else:
assert False
return attributes
```
|
{
"source": "JelleAalbers/eagerpy",
"score": 2
}
|
#### File: eagerpy/tensor/numpy.py
```python
from typing import (
Tuple,
cast,
Union,
Any,
Iterable,
Optional,
overload,
Callable,
TYPE_CHECKING,
)
from typing_extensions import Literal
import numpy as np
from ..types import Axes, AxisAxes, Shape, ShapeOrScalar
from .tensor import TensorType
from .tensor import Tensor
from .tensor import TensorOrScalar
from .base import BaseTensor
from .base import unwrap_
from .base import unwrap1
if TYPE_CHECKING:
from .extensions import NormsMethods # noqa: F401
def assert_bool(x: Any) -> None:
if not isinstance(x, Tensor):
return
if x.dtype != np.dtype("bool"):
raise ValueError(f"requires dtype bool, got {x.dtype}, consider t.bool().all()")
class NumPyTensor(BaseTensor):
__slots__ = ()
# more specific types for the extensions
norms: "NormsMethods[NumPyTensor]"
def __init__(self, raw: "np.ndarray"): # type: ignore
super().__init__(raw)
@property
def raw(self) -> "np.ndarray": # type: ignore
return super().raw
def numpy(self: TensorType) -> Any:
a = self.raw.view()
if a.flags.writeable:
# without the check, we would attempt to set it on array
# scalars, and that would fail
a.flags.writeable = False
return a
def item(self) -> Union[int, float, bool]:
return self.raw.item() # type: ignore
@property
def shape(self: TensorType) -> Shape:
return cast(Tuple, self.raw.shape)
def reshape(self: TensorType, shape: Union[Shape, int]) -> TensorType:
if isinstance(shape, int):
shape = (shape,)
return type(self)(self.raw.reshape(shape))
def astype(self: TensorType, dtype: Any) -> TensorType:
return type(self)(self.raw.astype(dtype))
def clip(self: TensorType, min_: float, max_: float) -> TensorType:
return type(self)(np.clip(self.raw, min_, max_))
def square(self: TensorType) -> TensorType:
return type(self)(np.square(self.raw))
def arctanh(self: TensorType) -> TensorType:
return type(self)(np.arctanh(self.raw))
def sum(
self: TensorType, axis: Optional[AxisAxes] = None, keepdims: bool = False
) -> TensorType:
return type(self)(self.raw.sum(axis=axis, keepdims=keepdims))
def prod(
self: TensorType, axis: Optional[AxisAxes] = None, keepdims: bool = False
) -> TensorType:
return type(self)(self.raw.prod(axis=axis, keepdims=keepdims))
def mean(
self: TensorType, axis: Optional[AxisAxes] = None, keepdims: bool = False
) -> TensorType:
if self.raw.dtype not in [np.float16, np.float32, np.float64]:
raise ValueError(
f"Can only calculate the mean of floating types. Got {self.raw.dtype} instead."
)
return type(self)(self.raw.mean(axis=axis, keepdims=keepdims))
def min(
self: TensorType, axis: Optional[AxisAxes] = None, keepdims: bool = False
) -> TensorType:
return type(self)(self.raw.min(axis=axis, keepdims=keepdims))
def max(
self: TensorType, axis: Optional[AxisAxes] = None, keepdims: bool = False
) -> TensorType:
return type(self)(self.raw.max(axis=axis, keepdims=keepdims))
def minimum(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(np.minimum(self.raw, unwrap1(other)))
def maximum(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(np.maximum(self.raw, unwrap1(other)))
def argmin(self: TensorType, axis: Optional[int] = None) -> TensorType:
return type(self)(self.raw.argmin(axis=axis))
def argmax(self: TensorType, axis: Optional[int] = None) -> TensorType:
return type(self)(self.raw.argmax(axis=axis))
def argsort(self: TensorType, axis: int = -1) -> TensorType:
return type(self)(self.raw.argsort(axis=axis))
def sort(self: TensorType, axis: int = -1) -> TensorType:
return type(self)(np.sort(self.raw, axis=axis))
def topk(
self: TensorType, k: int, sorted: bool = True
) -> Tuple[TensorType, TensorType]:
idx = np.take(np.argpartition(self.raw, k - 1), np.arange(-k, 0), axis=-1)
val = np.take_along_axis(self.raw, idx, axis=-1)
if sorted:
perm = np.flip(np.argsort(val, axis=-1), axis=-1)
idx = np.take_along_axis(idx, perm, axis=-1)
val = np.take_along_axis(self.raw, idx, axis=-1)
return type(self)(val), type(self)(idx)
def uniform(
self: TensorType, shape: ShapeOrScalar, low: float = 0.0, high: float = 1.0
) -> TensorType:
return type(self)(np.random.uniform(low, high, size=shape))
def normal(
self: TensorType, shape: ShapeOrScalar, mean: float = 0.0, stddev: float = 1.0
) -> TensorType:
return type(self)(np.random.normal(mean, stddev, size=shape))
def ones(self: TensorType, shape: ShapeOrScalar) -> TensorType:
return type(self)(np.ones(shape, dtype=self.raw.dtype))
def zeros(self: TensorType, shape: ShapeOrScalar) -> TensorType:
return type(self)(np.zeros(shape, dtype=self.raw.dtype))
def ones_like(self: TensorType) -> TensorType:
return type(self)(np.ones_like(self.raw))
def zeros_like(self: TensorType) -> TensorType:
return type(self)(np.zeros_like(self.raw))
def full_like(self: TensorType, fill_value: float) -> TensorType:
return type(self)(np.full_like(self.raw, fill_value))
def onehot_like(
self: TensorType, indices: TensorType, *, value: float = 1
) -> TensorType:
if self.ndim != 2:
raise ValueError("onehot_like only supported for 2D tensors")
if indices.ndim != 1:
raise ValueError("onehot_like requires 1D indices")
if len(indices) != len(self):
raise ValueError("length of indices must match length of tensor")
x = np.zeros_like(self.raw)
rows = np.arange(len(x))
x[rows, indices.raw] = value
return type(self)(x)
def from_numpy(self: TensorType, a: Any) -> TensorType:
return type(self)(np.asarray(a))
def _concatenate(
self: TensorType, tensors: Iterable[TensorType], axis: int = 0
) -> TensorType:
# concatenates only "tensors", but not "self"
tensors_ = unwrap_(*tensors)
return type(self)(np.concatenate(tensors_, axis=axis))
def _stack(
self: TensorType, tensors: Iterable[TensorType], axis: int = 0
) -> TensorType:
# stacks only "tensors", but not "self"
tensors_ = unwrap_(*tensors)
return type(self)(np.stack(tensors_, axis=axis))
def transpose(self: TensorType, axes: Optional[Axes] = None) -> TensorType:
if axes is None:
axes = tuple(range(self.ndim - 1, -1, -1))
return type(self)(np.transpose(self.raw, axes=axes))
def all(
self: TensorType, axis: Optional[AxisAxes] = None, keepdims: bool = False
) -> TensorType:
assert_bool(self)
return type(self)(self.raw.all(axis=axis, keepdims=keepdims))
def any(
self: TensorType, axis: Optional[AxisAxes] = None, keepdims: bool = False
) -> TensorType:
assert_bool(self)
return type(self)(self.raw.any(axis=axis, keepdims=keepdims))
def logical_and(self: TensorType, other: TensorOrScalar) -> TensorType:
assert_bool(self)
assert_bool(other)
return type(self)(np.logical_and(self.raw, unwrap1(other)))
def logical_or(self: TensorType, other: TensorOrScalar) -> TensorType:
assert_bool(self)
assert_bool(other)
return type(self)(np.logical_or(self.raw, unwrap1(other)))
def logical_not(self: TensorType) -> TensorType:
assert_bool(self)
return type(self)(np.logical_not(self.raw))
def exp(self: TensorType) -> TensorType:
return type(self)(np.exp(self.raw))
def log(self: TensorType) -> TensorType:
return type(self)(np.log(self.raw))
def log2(self: TensorType) -> TensorType:
return type(self)(np.log2(self.raw))
def log10(self: TensorType) -> TensorType:
return type(self)(np.log10(self.raw))
def log1p(self: TensorType) -> TensorType:
return type(self)(np.log1p(self.raw))
def tile(self: TensorType, multiples: Axes) -> TensorType:
multiples = unwrap1(multiples)
if len(multiples) != self.ndim:
raise ValueError("multiples requires one entry for each dimension")
return type(self)(np.tile(self.raw, multiples))
def softmax(self: TensorType, axis: int = -1) -> TensorType:
# for numerical reasons we subtract the max logit
# (mathematically it doesn't matter!)
# otherwise exp(logits) might become too large or too small
logits = self.raw
logits = logits - logits.max(axis=axis, keepdims=True)
e = np.exp(logits)
return type(self)(e / e.sum(axis=axis, keepdims=True))
def log_softmax(self: TensorType, axis: int = -1) -> TensorType:
# for numerical reasons we subtract the max logit
# (mathematically it doesn't matter!)
# otherwise exp(logits) might become too large or too small
logits = self.raw
logits = logits - logits.max(axis=axis, keepdims=True)
log_sum_exp = np.log(np.exp(logits).sum(axis=axis, keepdims=True))
return type(self)(logits - log_sum_exp)
def squeeze(self: TensorType, axis: Optional[AxisAxes] = None) -> TensorType:
return type(self)(self.raw.squeeze(axis=axis))
def expand_dims(self: TensorType, axis: int) -> TensorType:
return type(self)(np.expand_dims(self.raw, axis=axis))
def full(self: TensorType, shape: ShapeOrScalar, value: float) -> TensorType:
return type(self)(np.full(shape, value, dtype=self.raw.dtype))
def index_update(
self: TensorType, indices: Any, values: TensorOrScalar
) -> TensorType:
indices, values = unwrap_(indices, values)
if isinstance(indices, tuple):
indices = unwrap_(*indices)
x = self.raw.copy()
x[indices] = values
return type(self)(x)
def arange(
self: TensorType,
start: int,
stop: Optional[int] = None,
step: Optional[int] = None,
) -> TensorType:
return type(self)(np.arange(start, stop, step))
def cumsum(self: TensorType, axis: Optional[int] = None) -> TensorType:
return type(self)(self.raw.cumsum(axis=axis))
def flip(self: TensorType, axis: Optional[AxisAxes] = None) -> TensorType:
return type(self)(np.flip(self.raw, axis=axis))
def meshgrid(
self: TensorType, *tensors: TensorType, indexing: str = "xy"
) -> Tuple[TensorType, ...]:
tensors = unwrap_(*tensors)
outputs = np.meshgrid(self.raw, *tensors, indexing=indexing)
return tuple(type(self)(out) for out in outputs)
def pad(
self: TensorType,
paddings: Tuple[Tuple[int, int], ...],
mode: str = "constant",
value: float = 0,
) -> TensorType:
if len(paddings) != self.ndim:
raise ValueError("pad requires a tuple for each dimension")
for p in paddings:
if len(p) != 2:
raise ValueError("pad requires a tuple for each dimension")
if not (mode == "constant" or mode == "reflect"):
raise ValueError("pad requires mode 'constant' or 'reflect'")
if mode == "reflect":
# PyTorch's pad has limited support for 'reflect' padding
if self.ndim != 3 and self.ndim != 4:
raise NotImplementedError # pragma: no cover
k = self.ndim - 2
if paddings[:k] != ((0, 0),) * k:
raise NotImplementedError # pragma: no cover
if mode == "constant":
return type(self)(
np.pad(self.raw, paddings, mode=mode, constant_values=value)
)
else:
return type(self)(np.pad(self.raw, paddings, mode=mode))
def isnan(self: TensorType) -> TensorType:
return type(self)(np.isnan(self.raw))
def isinf(self: TensorType) -> TensorType:
return type(self)(np.isinf(self.raw))
def crossentropy(self: TensorType, labels: TensorType) -> TensorType:
if self.ndim != 2:
raise ValueError("crossentropy only supported for 2D logits tensors")
if self.shape[:1] != labels.shape:
raise ValueError("labels must be 1D and must match the length of logits")
# for numerical reasons we subtract the max logit
# (mathematically it doesn't matter!)
# otherwise exp(logits) might become too large or too small
logits = self.raw
logits = logits - logits.max(axis=1, keepdims=True)
e = np.exp(logits)
s = np.sum(e, axis=1)
ces = np.log(s) - np.take_along_axis(
logits, labels.raw[:, np.newaxis], axis=1
).squeeze(axis=1)
return type(self)(ces)
def slogdet(self: TensorType) -> Tuple[TensorType, TensorType]:
sign, logabsdet = np.linalg.slogdet(self.raw)
return type(self)(sign), type(self)(logabsdet)
@overload
def _value_and_grad_fn(
self: TensorType, f: Callable[..., TensorType]
) -> Callable[..., Tuple[TensorType, TensorType]]:
...
@overload # noqa: F811 (waiting for pyflakes > 2.1.1)
def _value_and_grad_fn(
self: TensorType, f: Callable[..., TensorType], has_aux: Literal[False]
) -> Callable[..., Tuple[TensorType, TensorType]]:
...
@overload # noqa: F811 (waiting for pyflakes > 2.1.1)
def _value_and_grad_fn(
self: TensorType,
f: Callable[..., Tuple[TensorType, Any]],
has_aux: Literal[True],
) -> Callable[..., Tuple[TensorType, Any, TensorType]]:
...
def _value_and_grad_fn( # noqa: F811 (waiting for pyflakes > 2.1.1)
self: TensorType, f: Callable, has_aux: bool = False
) -> Callable[..., Tuple]:
# TODO: maybe implement this using https://github.com/HIPS/autograd
raise NotImplementedError # pragma: no cover
def sign(self: TensorType) -> TensorType:
return type(self)(np.sign(self.raw))
def sqrt(self: TensorType) -> TensorType:
return type(self)(np.sqrt(self.raw))
def tanh(self: TensorType) -> TensorType:
return type(self)(np.tanh(self.raw))
def float32(self: TensorType) -> TensorType:
return self.astype(np.float32)
def float64(self: TensorType) -> TensorType:
return self.astype(np.float64)
def where(self: TensorType, x: TensorOrScalar, y: TensorOrScalar) -> TensorType:
x, y = unwrap_(x, y)
return type(self)(np.where(self.raw, x, y))
def __lt__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__lt__(unwrap1(other)))
def __le__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__le__(unwrap1(other)))
def __eq__(self: TensorType, other: TensorOrScalar) -> TensorType: # type: ignore
return type(self)(self.raw.__eq__(unwrap1(other)))
def __ne__(self: TensorType, other: TensorOrScalar) -> TensorType: # type: ignore
return type(self)(self.raw.__ne__(unwrap1(other)))
def __gt__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__gt__(unwrap1(other)))
def __ge__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__ge__(unwrap1(other)))
def __getitem__(self: TensorType, index: Any) -> TensorType:
if isinstance(index, tuple):
index = tuple(x.raw if isinstance(x, Tensor) else x for x in index)
elif isinstance(index, Tensor):
index = index.raw
return type(self)(self.raw[index])
def take_along_axis(self: TensorType, index: TensorType, axis: int) -> TensorType:
if axis % self.ndim != self.ndim - 1:
raise NotImplementedError(
"take_along_axis is currently only supported for the last axis"
)
return type(self)(np.take_along_axis(self.raw, index.raw, axis=axis))
def bool(self: TensorType) -> TensorType:
return self.astype(np.dtype("bool"))
```
|
{
"source": "JelleAalbers/fastubl",
"score": 2
}
|
#### File: fastubl/fastubl/wilks_hist.py
```python
from multihist import Hist1d
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy import stats
__all__ = ['wilks_hist']
default_percentiles = (
(50, '50%'),
(90, '90%'),
(100 * (1 - stats.norm.cdf(-2)), '2$\sigma$'),
(100 * (1 - stats.norm.cdf(-3)), '3$\sigma$'))
default_bins = np.linspace(-1, 15, 100)
theory_colors = dict(wilks='darkorange',
chernoff='seagreen')
def wilks_hist(result, bins=None,
show_percentiles=None,
show_theory=('wilks',)):
if show_percentiles is None:
show_percentiles = default_percentiles
if not show_percentiles:
show_percentiles = tuple()
if isinstance(show_theory, str):
show_theory = (show_theory,)
if bins is None:
bins = default_bins
h = Hist1d(result, bins=bins)
x = h.bin_centers
y = h.histogram
plt.fill_between(x, y - y ** 0.5, y + y ** 0.5,
color='b', label='Simulation',
alpha=0.4, step='mid', linewidth=0)
plt.plot(x, y, linestyle='steps-mid', color='b', linewidth=0.5)
wilks_dist = stats.chi2(1)
wilks_y = np.diff(wilks_dist.cdf(bins)) * h.n
chernoff_y0 = (lookup(0, x, wilks_y) + h.n) / 2
if 'wilks' in show_theory:
plt.plot(x,
wilks_y,
color=theory_colors['wilks'], label='Wilks')
if 'chernoff' in show_theory:
plt.plot(x,
wilks_y / 2,
color=theory_colors['chernoff'], label='Chernoff')
plt.scatter(0, chernoff_y0,
marker='.', color=theory_colors['chernoff'])
plt.yscale('log')
plt.ylabel("Toys / bin")
plt.ylim(0.8, None)
plt.gca().yaxis.set_major_formatter(
matplotlib.ticker.FormatStrFormatter('%g'))
plt.xlabel("-2 $\log ( L({\mu_s}^{*}) / L(\hat{\mu_s}) )$")
plt.xlim(h.bin_edges[0], h.bin_edges[-1])
plt.legend(loc='upper right')
ax = plt.gca()
t1 = ax.transData
t2 = ax.transAxes.inverted()
def data_to_axes(x, y):
return t2.transform(t1.transform((x, y)))
def pc_line(x, y, label=None, color='b', alpha=0.8):
plt.axvline(x,
ymax=data_to_axes(x, y)[1],
color=color, alpha=alpha, linewidth=0.5)
if label:
plt.text(x + 0.15, .9, label,
rotation=90,
horizontalalignment='left',
verticalalignment='bottom')
for pc, label in show_percentiles:
x = np.percentile(result, pc)
y = h.lookup(x)
pc_line(x, y, label=label, color='k', alpha=1)
if 'wilks' in show_theory:
x = wilks_dist.ppf(pc / 100)
y = lookup(x, h.bin_centers, wilks_y)
pc_line(x, y, color=theory_colors['wilks'])
if 'chernoff' in show_theory:
if pc <= 50:
x = 0
y = chernoff_y0
else:
x = wilks_dist.ppf(1 - 2 * (1 - pc/100))
y = lookup(x, h.bin_centers, wilks_y) / 2
pc_line(x, y, color=theory_colors['chernoff'])
def lookup(x, xp, yp):
return yp[np.argmin(np.abs(x - xp))]
```
|
{
"source": "JelleAalbers/hypney",
"score": 2
}
|
#### File: hypney/estimators/confidence_interval.py
```python
from functools import partial
import numpy as np
from scipy import optimize, interpolate
import hypney
export, __all__ = hypney.exporter()
@export
class EmptyIntervalError(Exception):
"""Raised when empty interval would be returned
(possible, but also possible anchors badly chosen)
"""
pass
@export
class FullIntervalError(Exception):
"""Raised when the whole real line would be returned
(possible, but also possible anchors badly chosen)
"""
pass
@export
class ConfidenceInterval:
def __init__(
self,
stat,
poi=hypney.DEFAULT_RATE_PARAM.name,
cl=0.9,
sign=1,
anchors=None,
use_cdf=False,
ppf_fudge=0,
ppf_interpolation=None,
):
"""
Args:
- ppf_interpolation: can be
None to just actually ppf at non-anchor values
'linear' to linearly interpolate ppf(quantile) between anchors
'loglog' linearly interpolate log(ppf[log(quantile)])
"""
self.stat = stat
self.poi = poi
self.cl = cl
self.sign = sign
self.use_cdf = use_cdf
self.ppf_fudge = ppf_fudge
self._ppf_interpolation = ppf_interpolation
self.poi_spec = self.stat.model.param_spec_for(poi)
if not self.stat.dist:
raise ValueError(
"Statistic has no distribution, cannot set confidence intervals"
)
# Collect anchors
user_gave_anchors = bool(anchors)
# (conditions are wordy since np.array has no truth value
if anchors is None or not len(anchors):
# Get anchors from the (reparametrized) distribution
# (these may e.g. be present if the dist was generated from toys)
anchors = self.stat.dist.param_spec_for(poi).anchors
if anchors is None or not len(anchors):
# No anchors in dist; try the model instead.
anchors = self.poi_spec.anchors
if anchors is None or not len(anchors):
# If bounds on param are finite, use them as anchors
bounds = np.array([self.poi_spec.min, self.poi_spec.max])
if np.all(np.isfinite(bounds)):
anchors = bounds
if anchors is None or not len(anchors):
raise ValueError("Provide anchors to initially evaluate poi on")
anchors = np.asarray(hypney.utils.eagerpy.ensure_numpy(anchors))
if not user_gave_anchors and hasattr(self.stat, "bestfit"):
# Add bestfit of POI as an anchor
anchors = np.concatenate(anchors, self.stat.bestfit[poi])
self.anchors = np.sort(anchors)
# +1 for upper limit on statistic that (on large scales)
# takes higher-percentile values as the POI grows (like count)
self.combined_sign = self.sign * self.side
if self.combined_sign > 0:
# Counterintuitive, but see Neyman belt construction diagram.
self.crit_quantile = 1 - self.cl
else:
self.crit_quantile = self.cl
if self.use_cdf:
# We will use the CDF to transform statistics to p-values
# Can't do much here, have to wait for data.
self._cdf = stat.dist.cdf
self._ppf_pre_fudge = self._trivial_ppf
self.crit_at_anchors = np.full(len(self.anchors), self.crit_quantile)
else:
# We will use the ppf to find critical value of statistic
# Won't need the cdf, set it to identity.
# Can compute critical value at anchors already here,
# (so won't need to repeat it when testing several datasets)
self._cdf = self._trivial_cdf
self._ppf_pre_fudge = stat.dist(quantiles=self.crit_quantile).ppf
# Find critical value (=corresponding to quantile crit_quantile) at anchors.
self.crit_at_anchors = self._ppf(params={self.poi: self.anchors})
if self._ppf_interpolation:
x, y = self.anchors, self.crit_at_anchors
if self._ppf_interpolation == "linear":
self._ppf_pre_fudge = interpolate.interp1d(x, y)
elif self._ppf_interpolation == "loglog":
self._ppf_pre_fudge = hypney.utils.interpolation.interp1d_loglog(
x, y
)
else:
raise ValueError(
f"Unknown ppf interpolations strategy {self._ppf_interpolation}"
)
def _ppf(self, *args, **kwargs):
return self.ppf_fudge + self._ppf_pre_fudge(*args, **kwargs)
def _trivial_cdf(self, data, params):
return data
def _trivial_ppf(self, params):
return self.crit_quantile + self.ppf_fudge
def __call__(self, data=hypney.NotChanged):
stat = self.stat(data=data)
# Evaluate statistic at anchors
# (statistic is vectorized over params)
anchor_pars = {self.poi: stat.model._to_tensor(self.anchors).raw}
stat_at_anchors = stat.compute(params=anchor_pars)
if self.use_cdf:
# Use CDF to transform statistic to a p-value
stat_at_anchors = np.array(
[
stat.dist.cdf(data=stat_val, params={self.poi: x})
for x, stat_val in zip(self.anchors, stat_at_anchors)
]
)
crit_minus_stat = self.crit_at_anchors - stat_at_anchors
isnan = np.isnan(crit_minus_stat)
if np.any(isnan):
raise ValueError(
f"statistic or critical value NaN at {self.anchors[isnan]}"
)
# sign+1 => upper limit is above the highest anchor for which
# crit - stat <= 0 (i.e. crit too low, so still in interval)
still_in = np.where(self.combined_sign * crit_minus_stat <= 0)[0]
if not len(still_in):
raise EmptyIntervalError(
f"None of the anchors {self.anchors} are inside the confidence interval"
)
if self.side > 0:
ileft = still_in[-1]
if ileft == len(self.anchors) - 1:
# Highest possible value is still in interval.
if self.anchors[-1] == self.poi_spec.max:
# Fine, since it's the maximum possible value
return self.anchors[-1]
else:
raise FullIntervalError(
f"Can't compute upper limit, highest anchor {self.anchors[-1]} still in interval"
)
iright = ileft + 1
else:
iright = still_in[0]
if iright == 0:
# Lowest possible value is still in interval.
if self.anchors[0] == self.poi_spec.min:
# Fine, since it's the minimum possible value
return self.anchors[0]
else:
raise ValueError(
f"Can't compute lower limit, lowest anchor {self.anchors[0]} still in interval"
)
ileft = iright - 1
# Find zero of (crit - stat) - tiny_offset
# The offset is needed if crit = stat for an extended length
# e.g. for Count or other discrete-valued statistics.
# TODO: can we use grad? optimize.root takes a jac arg...
# Don't ask about the sign. All four side/sign combinations are tested...
offset = self.sign * 1e-9 * (crit_minus_stat[ileft] - crit_minus_stat[iright])
return optimize.brentq(
partial(self._objective, stat=stat, offset=offset),
self.anchors[ileft],
self.anchors[iright],
)
def _objective(self, x, stat, offset):
params = {self.poi: x}
return (
# One of ppf/cdf is trivial here, depending on self.use_cdf
self._ppf(params=params)
- self._cdf(data=stat.compute(params=params), params=params)
+ offset
)
@export
class UpperLimit(ConfidenceInterval):
side = +1
@export
class LowerLimit(ConfidenceInterval):
side = -1
@export
class CentralInterval:
def __init__(self, *args, cl=0.9, **kwargs):
kwargs["cl"] = 1 - (1 - cl) / 2
self._lower = LowerLimit(*args, **kwargs)
self._upper = UpperLimit(*args, **kwargs)
def __call__(self, data=hypney.NotChanged):
return self._lower(data), self._upper(data)
```
#### File: hypney/models/transform_data.py
```python
import hypney
export, __all__ = hypney.exporter()
@export
class TransformedDataModel(hypney.WrappedModel):
"""Model for data that has been shifted, then scaled.
Args (beyond those of Model):
- orig_model: original model
- shift: constant to add to data
- scale: constant to multiply shifted data
"""
shift = 0.0
scale = 1.0
def _data_from_orig(self, orig_data):
"""Apply to data generated from model"""
return self.scale * (orig_data + self.shift)
def _data_to_orig(self):
"""Return self.data, with reverse of _data_from_orig applied
so it can be fed to original model.
"""
return (self.data / self.scale) - self.shift
def _transform_jac_det(self):
return abs(1 / self.scale)
##
# Initialization
##
def __init__(
self, *args, shift=hypney.NotChanged, scale=hypney.NotChanged, **kwargs
):
if shift is not hypney.NotChanged:
self.shift = shift
if scale is not hypney.NotChanged:
self.scale = scale
super().__init__(*args, **kwargs)
def _init_data(self):
if "data" not in self._orig_already_has:
self._orig_model = self._orig_model(data=self._data_to_orig())
def _init_quantiles(self):
# ppf not implemented yet
pass
# Simulation
def _simulate(self, params):
return self._data_from_orig(self._orig_model._simulate(params))
def _rvs(self, size: int, params: dict):
return self._data_from_orig(self._orig_model._rvs(size=size, params=params))
# Methods using data / quantiles
def _pdf(self, params):
return self._orig_model._pdf(params) * self._transform_jac_det()
def _logpdf(self, params):
# Careful with log, the jac_det may be a scalar and confuse eagerpy
return (
self._orig_model._logpdf(params)
+ hypney.utils.eagerpy.astensor(
self._transform_jac_det(), match_type=self.data
).log()
)
def _cdf(self, params):
result = self._orig_model._cdf(params)
if self.scale < 0:
result = 1 - result
return result
def _ppf(self, params):
raise NotImplementedError
# Methods not using data
def _rate(self, params):
return self._orig_model._rate(params)
def _mean(self, params):
return self._data_from_orig(self._orig_model._mean(params))
def _std(self, params: dict):
return self._orig_model._std(params) * self.scale
```
#### File: hypney/hypney/statistic.py
```python
from copy import copy
from concurrent.futures import ProcessPoolExecutor
import functools
import gzip
import os
from pathlib import Path
import pickle
import warnings
import eagerpy as ep
import numpy as np
import hypney
from hypney import NotChanged
export, __all__ = hypney.exporter()
@export
class Statistic:
model: hypney.Model # Model of the data
_dist: hypney.Model = None # Model of the statistic; takes same parameters
@property
def dist(self):
# Just so people won't assign it by accident...
# pass dist=... in __init__ instead.
return self._dist
##
# Initialization
##
def __init__(
self,
model: hypney.Model,
data=NotChanged,
params=NotChanged,
dist=None,
**kwargs,
):
self.model = model
self._set_dist(dist)
if data is NotChanged:
# Do not bypass _set_data; if the model has data,
# we'll want to run _init_data on it
data = self.model.data
self._set_data(data)
self._set_defaults(params, **kwargs)
def _set_dist(self, dist: hypney.Model):
if dist is NotChanged:
return
if dist is None:
if hasattr(self, "_build_dist"):
# Statistic has a default distribution
dist = self._build_dist()
else:
# Leave self.dist at None (some estimators will complain)
assert self._dist is None
return
if isinstance(dist, (str, Path)):
# Load distribution from a pickle
_open = gzip.open if str(dist).endswith(".gz") else open
with _open(dist) as f:
dist = pickle.load(f)
if self._has_redefined("_dist_params"):
# For some statistics (e.g. count), distributions take different
# parameters than the model, specified by _dist_params.
# Thus, wrap dist in an appropriate reparametrization.
# Unfortunately, there is no easy way to preserve any anchors...
dist = dist.reparametrize(
transform_params=self._dist_params, param_specs=self.model.param_specs,
)
# Ensure dist has same defaults as models
self._dist = dist(params=self.model.defaults)
def _set_data(self, data):
if data is NotChanged:
return
self.model = self.model(data=data)
if self.model.data is not None:
self._init_data()
def _init_data(self):
"""Initialize self.data (either from construction or data change)"""
pass
@property
def data(self) -> ep.Tensor:
return self.model.data
def _set_defaults(self, params=NotChanged, **kwargs):
self.model = self.model(params=params, **kwargs)
def set(self, data=NotChanged, dist=NotChanged, params=NotChanged, **kwargs):
"""Return a statistic with possibly changed data or distribution"""
if (
data is NotChanged
and dist is NotChanged
and params is NotChanged
and not kwargs
):
return self
new_self = copy(self)
new_self._set_defaults(params, **kwargs)
new_self._set_dist(dist)
new_self._set_data(data)
return new_self
def _has_redefined(self, method_name, from_base=None):
"""Returns if method_name is redefined from Statistic.method_name"""
if from_base is None:
from_base = Statistic
f = getattr(self, method_name)
if not hasattr(f, "__func__"):
return True
return f.__func__ is not getattr(from_base, method_name)
##
# Computation
##
def __call__(self, data=NotChanged, dist=NotChanged, params=NotChanged, **kwargs):
return self.set(data=data, params=params, **kwargs)
def compute(self, data=NotChanged, params: dict = None, **kwargs) -> ep.TensorType:
self = self.set(data=data)
if self.data is None:
raise ValueError("Data must be set first")
return self.model._scalar_method(self._compute, params=params, **kwargs)
def _compute(self, params):
# data has shape ([n_datasets?], n_events, n_observables)
# params have shape ([batch_shape], 1)
# result has to be shape ([n_datasets?], [batch_shape], 1)
raise NotImplementedError
##
# Simulation
##
def rvs(
self,
size=1,
params=NotChanged,
transform=np.asarray,
nan_on_exception=False,
**kwargs,
) -> np.ndarray:
"""Return statistic evaluated on simulated data,
generated from model with params
Args:
- size: number of toys to draw
- params, **kwargs: parameters at which to simulate toys
- transform: run numpy data through this function before passing
it to statistic. Useful to convert to an autograd library,
e.g. torch.from_numpy / tf.convert_to_tensor.
"""
# Set defaults once to avoid re-validation
self = self.set(params=params, **kwargs)
results = np.zeros(size)
for i in range(size):
sim_data = transform(self.model._simulate(params=self.model.defaults))
if nan_on_exception:
try:
results[i] = self.compute(data=sim_data)
except Exception as e:
warnings.warn(f"Exception during test statistic evaluation: {e}")
results[i] = float("nan")
else:
results[i] = self.compute(data=sim_data)
return results
##
# Distribution
##
def _dist_params(self, params):
"""Return distribution params given model params"""
return params
def dist_from_toys(
self,
params=NotChanged,
n_toys=1000,
transform=np.asarray,
options=None,
nan_on_exception=False,
**kwargs,
):
"""Return an estimated distribution of the statistic given params
from running simulations.
"""
if options is None:
options = dict()
# Use a *lot* of bins by default, since we're most interested
# in the cdf/ppf
options.setdefault("bin_count_multiplier", 10)
options.setdefault("mass_bins", True)
# Set defaults before simulation; helps provide e.g. better minimizer guesses
self = self.set(params=params, **kwargs)
toys = self.rvs(n_toys, transform=transform, nan_on_exception=nan_on_exception)
dist = hypney.models.from_samples(toys, **options)
# Remove all parameters (to avoid confusion with model parameters)
return dist.freeze()
def interpolate_dist_from_toys(
self, anchors: dict, progress=True, methods="ppf", map=map, **kwargs
):
"""Estimate this statistic's distribution by Monte Carlo.
This draws toys at a grid specified by the anchors.
By default, we then interpolate the ppf, since this is what you need
for confidence interval setting.
"""
assert isinstance(anchors, dict), "Pass a dict of sequences as anchors"
if self._has_redefined("_dist_params"):
# Build a distribution that takes the _dist_params
# rather than the model's params.
# Compute new anchors using self._dist_params
if len(anchors) > 1:
raise NotImplementedError(
"Multi-parameter interpolation not supported if _dist_params is nontrivial"
)
# (Since we'd have to transform the whole grid of anchors.
# Even if the transformation is simple enough to allow this,
# we don't have that grid here yet
# (back and forth to tensor necessary to support dist_params that
# do calls -- e.g. Count's dist calls to model._rate)
param_tensors = {k: self.model._to_tensor(v) for k, v in anchors.items()}
dist_anchors = self._dist_params(param_tensors)
dist_anchors = {k: v.numpy().tolist() for k, v in dist_anchors.items()}
# Set up transformation dictionary
# from old (model) to new (dist) anchors
dist_pname = list(dist_anchors.keys())[0]
model_pname = list(anchors.keys())[0]
model_to_dist_anchor = dict(
zip(tuple(anchors[model_pname]), dist_anchors[dist_pname])
)
# The interpolator will work in the new (dist) anchors
# Thus model_builder must transform back to the old (model) anchors
# We cannot define the function here inline, that would break pickle
model_builder = functools.partial(
_transformed_model_builder,
self=self,
model_pname=model_pname,
dist_pname=dist_pname,
model_to_dist_anchor=model_to_dist_anchor,
**kwargs,
)
anchors = dist_anchors
else:
model_builder = functools.partial(self.dist_from_toys, **kwargs)
anchors = anchors
return hypney.models.Interpolation(
model_builder, anchors, progress=progress, map=map, methods=methods,
).fix_except(anchors.keys())
def with_stored_dist(
self,
dist_filename,
n_toys=None,
rate_anchors=hypney.DEFAULT_RATE_GRID,
max_workers=None,
dist_dir="cached_dists",
):
"""Return statistic with distribution loaded from cache_dir,
or rebuilt from toy mc if file does not exist
TODO: describe rate anchors or generalize
"""
if n_toys is None:
n_toys = 10_000
if max_workers is None:
max_workers = min(32, os.cpu_count() - 4)
dist_dir = Path(f"./{dist_dir}/")
dist_dir.mkdir(exist_ok=True)
dist_filename = dist_dir / f"{dist_filename}_{n_toys}.pkl.gz"
if dist_filename.exists():
with gzip.open(dist_filename) as f:
return self.set(dist=pickle.load(f))
else:
mu_min, mu_max = [f(rate_anchors) for f in (min, max)]
print(
f"Building distribution {dist_filename}, {n_toys} toys,"
f"mu in [{mu_min}, {mu_max}]"
)
with ProcessPoolExecutor(max_workers=max_workers) as exc:
dist = self.interpolate_dist_from_toys(
anchors=dict(rate=hypney.DEFAULT_RATE_GRID.tolist()),
n_toys=n_toys,
map=exc.map,
)
with gzip.open(dist_filename, mode="wb") as f:
pickle.dump(dist, f)
return self.set(dist=dist)
def _transformed_model_builder(
dist_params, *, self, model_pname, dist_pname, model_to_dist_anchor, **kwargs
):
model_params = {model_pname: model_to_dist_anchor[dist_params[dist_pname]]}
return self.dist_from_toys(params=model_params, **kwargs)
```
#### File: hypney/statistics/yellin_hawks.py
```python
import gzip
import pickle
import numpy as np
import multihist # pickle contains a multihist
from scipy.interpolate import RegularGridInterpolator
import hypney
from hypney.utils.numba import factorial
from .deficit_hawks import AllRegionFullHawk, AllRegionSimpleHawk
export, __all__ = hypney.exporter()
# TODO: where to store file in repo?
with gzip.open(
"/home/jaalbers/Documents/projects/robust_inference_2/cn_cdf.pkl.gz"
) as f:
mh = pickle.load(f)
# Pad with zero at frac = 0
cdfs = np.pad(mh.histogram, [(0, 0), (0, 0), (1, 0)])
points = mh.bin_centers()
# We cumulated along the 'frac' dimension; values represent
# P(frac <= right bin edge)
points[2] = np.concatenate([[0], mh.bin_edges[2][1:]])
# Full intervals (frac = 1) should not always score 1.
# Linearly interpolate the last cdf bin instead:
cdfs[:, :, -1] = (cdfs[:, :, -2] + (cdfs[:, :, -2] - cdfs[:, :, -3])).clip(0, 1)
p_smaller_x_itp = RegularGridInterpolator(points, cdfs)
itp_max_mu = mh.bin_centers("mu").max()
@export
def p_smaller_itv(n, mu, frac):
"""Probability of finding a largest N-event-containing interval
smaller than frac (i.e. with less fraction of expected signal)
Args:
- n: observed events
- mu: *total* expected events (not expected event in interval!)
- frac: fraction of events expected in interval
"""
# I'm assuming mu has the largest shape. Ravel may be inefficient but
# I think RegularGridInterpolator won't work without it
# TODO: eagerpy-ify this.
mu = hypney.utils.eagerpy.ensure_numpy(mu)
was_float = isinstance(mu, (int, float))
mu = np.asarray(mu)
n = np.asarray(0 * mu + n)
frac = np.asarray(0 * mu + frac)
points = np.stack([mu.ravel(), n.ravel(), frac.ravel()]).T
result = p_smaller_x_itp(points)
result = result.reshape(mu.shape)
if was_float:
return result.item()
return result
@export
class YellinCNHawk(AllRegionSimpleHawk):
def _dist_params(self, params):
# Distribution depends only on # expected events
return dict(mu=self.model._rate(params))
def _compute_scores(self, n, mu, frac):
return -p_smaller_itv(n=n, mu=mu / frac, frac=frac)
##
# Alternate implementation as a full hawk
# use only for testing; it's just slower than YellinCNHawk!
##
@export
class YellinCN(hypney.Statistic):
"""Computes - C_n(x, mu) for one interval
Here, C_n(x, mu) is the probability of finding a largest N-event-containing
interval smaller than frac (i.e. with less fraction of expected signal)
given the true rate mu.
"""
def _compute(self, params):
assert self.model._backend_name == "numpy"
mu = self.model.rate(params)
n = len(self.data)
assert isinstance(self.model, hypney.models.CutModel)
frac = self.model.cut_efficiency(params)
# Minus, since deficit hawks take the minimum over cuts
result = -p_smaller_itv(n=n, mu=mu / frac, frac=frac)
return result
@export
class YellinCNFullHawk(AllRegionFullHawk):
statistic_class = YellinCN
def _dist_params(self, params):
# Distribution depends only on # expected events in the region
return dict(mu=self.model._rate(params))
# Not really needed but useful for testing
@hypney.utils.numba.maybe_jit
def p_smaller_x_0(mu, frac):
# Equation 2 from https://arxiv.org/pdf/physics/0203002.pdf
# The factorial causes OverflowError for sufficiently small/unlikely fracs
# TODO: maybe put 0 instead of raising exception?
x = frac * mu
m = int(np.floor(mu / x))
ks = np.arange(0, m + 1)
return (
(ks * x - mu) ** ks * np.exp(-ks * x) / factorial(ks) * (1 + ks / (mu - ks * x))
).sum()
```
#### File: hypney/utils/numba.py
```python
import math
import numpy as np
import hypney
export, __all__ = hypney.exporter()
try:
import numba
have_numba = True
except ImportError:
have_numba = False
print("Get numba man, it's great")
@export
def maybe_jit(f):
if have_numba:
return numba.jit(f)
else:
return f
# See https://stackoverflow.com/questions/44346188
# and https://stackoverflow.com/questions/62056035
# (21! and above are no longer 64-bit integers)
FACTORIALS = np.array(
[
1,
1,
2,
6,
24,
120,
720,
5040,
40320,
362880,
3628800,
39916800,
479001600,
6227020800,
87178291200,
1307674368000,
20922789888000,
355687428096000,
6402373705728000,
121645100408832000,
2432902008176640000,
],
dtype="int64",
)
@maybe_jit
def factorial(n):
if np.max(n) > 20:
raise OverflowError("Factorials of n>20 are not int64s")
return FACTORIALS[n]
```
#### File: tests/models/test_cut.py
```python
import hypney
import numpy as np
from scipy import stats
def test_cut():
m_base = hypney.models.norm()
m_cut = m_base.cut(0, None)
assert isinstance(m_cut.simulate(), np.ndarray)
assert m_cut._cut == ((0, float("inf")),)
assert m_cut.cut_efficiency() == 0.5
assert m_cut.cut_efficiency(loc=1) == stats.norm(loc=1).sf(0)
assert m_cut.rvs(100).min() >= 0
m_half = hypney.models.halfnorm()
x = np.linspace(-5, 5, 10)
np.testing.assert_almost_equal(m_cut.rate(), 0.5)
np.testing.assert_almost_equal(m_cut.diff_rate(x), m_half.diff_rate(x) / 2)
# Extra vectorization tests... last one catches low, high = corner_cdf unpacking bug
np.testing.assert_almost_equal(m_cut.rate(rate=2), 1)
np.testing.assert_almost_equal(m_cut.rate(rate=[2]), np.array([1]))
np.testing.assert_almost_equal(m_cut.cdf(0), 0)
np.testing.assert_almost_equal(m_cut.cdf([0, float("inf")]), np.array([0, 1]))
np.testing.assert_almost_equal(
m_cut.cdf([0, float("inf")], rate=[2, 4]), np.array([[0, 1], [0, 1]])
)
for params in dict(a=0, b=float("inf")), dict(a=-1, b=1):
m_cut = m_base.cut(params["a"], params["b"])
m_trunc = hypney.models.truncnorm(**params)
np.testing.assert_almost_equal(m_cut.pdf(x), m_trunc.pdf(x, params))
np.testing.assert_almost_equal(m_cut.logpdf(x), m_trunc.logpdf(x, params))
np.testing.assert_almost_equal(m_cut.cdf(x), m_trunc.cdf(x, params))
q = np.linspace(0, 1, 100)
np.testing.assert_almost_equal(m_cut.ppf(q), m_trunc.ppf(q, params))
# Test cutting a combined model (caught a bug with cut_efficiency > 1 once)
m = (
hypney.models.uniform().fix_except("rate") + hypney.models.uniform().freeze()
).cut(None, None)
assert m.rate(rate=1) == m.rate(rate=[1])[0]
```
#### File: tests/models/test_interpolated.py
```python
import hypney
import numpy as np
def test_interpolated():
def builder(params):
return hypney.models.uniform(**params, rate=1000)
m = hypney.models.Interpolation(
builder, param_specs=dict(loc=(-0.5, 0, 0.5)), methods=("mean", "pdf")
)
data = m.simulate()
assert len(data)
assert 0.4 < data.mean() < 0.6
assert m.rate() == 1000.0
# TODO: Derive these analytically. But they look plausible.
x = np.array([-0.01, 0.01, 0.49, 0.51, 0.99, 1.01, 1.49, 1.51])
y = np.array([0, 0.6, 0.6, 1.0, 1.0, 0.4, 0.4, 0.0])
m2 = m(data=x)
np.testing.assert_array_almost_equal(m2.pdf(params=dict(loc=0.2)), y)
np.testing.assert_array_almost_equal(m2.diff_rate(params=dict(loc=0.2)), y * 1000.0)
# Test vectorization
locs = np.array([0.2, 0, -0.2])
rate_list = np.array([m2.rate(loc=x) for x in locs])
rate_arr = m2.rate(loc=locs)
np.testing.assert_array_equal(rate_list, rate_arr)
pdf_list = np.stack([m2.pdf(loc=x) for x in locs])
pdf_arr = m2.pdf(loc=locs)
np.testing.assert_array_equal(pdf_list, pdf_arr)
# No, linearly interpolated CDF is not the inverse of the linearly interpolated PPF
# (nor is it the integral of the linearly interpolated PDF.. pretty tricky)
# Test two dimensions of anchors
m2 = hypney.models.Interpolation(
builder,
param_specs=dict(loc=(-0.5, 0, 0.5), scale=(0.5, 1, 1.5)),
data=x,
methods="pdf",
)
# TODO: same here
y = np.array([0.0, 1.6, 1.6, 0.4, 0.4, 0.0, 0.0, 0.0])
np.testing.assert_array_almost_equal(m2.pdf(params=dict(scale=0.7)), y)
```
#### File: tests/utils/test_interpolator.py
```python
import eagerpy as ep
import numpy as np
from scipy.interpolate import RegularGridInterpolator
import hypney
tl = ep.numpy
def test_regular_grid_interpolator():
"""Adapted from
https://github.com/sbarratt/torch_interpolations/blob/master/tests/test_grid_interpolator.py
"""
points = [tl.arange(-0.5, 2.5, 0.1) * 1.0, tl.arange(-0.5, 2.5, 0.2) * 1.0]
values = (
hypney.utils.eagerpy.sin(points[0])[:, None]
+ 2 * hypney.utils.eagerpy.cos(points[1])[None, :]
+ hypney.utils.eagerpy.sin(5 * points[0][:, None] @ points[1][None, :])
)
X, Y = ep.meshgrid(tl.arange(-0.5, 2, 0.1), tl.arange(-0.5, 2, 0.1))
points_to_interp = ep.stack([X.flatten(), Y.flatten()]).T
gi = hypney.utils.interpolation.RegularGridInterpolator(points, values)
fx = gi(points_to_interp)
rgi = RegularGridInterpolator(
[p.numpy() for p in points], [x.numpy() for x in values], bounds_error=False
)
rfx = rgi(points_to_interp.numpy())
np.testing.assert_allclose(rfx, fx.numpy(), atol=1e-6)
# TODO: port derivative test to eagerpy
# note that points_to_interp has to be transposed
#
# def test_regular_grid_interpolator_derivative():
# points = [torch.arange(-.5, 2.5, .5) * 1., torch.arange(-.5, 2.5, .5) * 1.]
# values = torch.sin(points[0])[:, None] + 2 * torch.cos(points[1])[None, :] + torch.sin(5 * points[0][:, None] @ points[1][None, :])
# values.requires_grad_(True)
#
# X, Y = np.meshgrid(np.arange(-.5, 2, .19), np.arange(-.5, 2, .19))
# points_to_interp = [torch.from_numpy(
# X.flatten()).float(), torch.from_numpy(Y.flatten()).float()]
#
# def f(values):
# return torch_interpolations.RegularGridInterpolator(
# points, values)(points_to_interp)
#
# torch.autograd.gradcheck(f, (values,), eps=1e-5, atol=1e-1, rtol=1e-1)
def test_interpolator_builder():
itp = hypney.utils.interpolation.InterpolatorBuilder([(-1, 0, 1)])
def scalar_f(z):
return z[0]
z = ep.astensor(np.array([1, 0, -1, 0, 1, 1, -1]))
scalar_itp = itp.make_interpolator(scalar_f)
np.testing.assert_array_equal(scalar_itp(z).numpy(), z.numpy())
def matrix_f(z):
return ep.astensor(np.ones((2, 2)) * z[0])
matrix_itp = itp.make_interpolator(matrix_f)
np.testing.assert_array_equal(
matrix_itp(z).numpy(), z[:, None, None].numpy() * np.ones((1, 2, 2))
)
# What happened here? Does the test not make sense or did the API change?
# np.testing.assert_array_equal(
# matrix_itp(ep.numpy.array([0, 0, 0])).numpy(),
# np.ones((2, 2)))
```
|
{
"source": "JelleAalbers/plunc",
"score": 4
}
|
#### File: plunc/plunc/common.py
```python
import numpy as np
# TODO: instead of rounding to digits, make a uniform log space where we snap to
def round_to_digits(x, n_digits):
"""Rounds x to leading digits"""
x = float(x) # Doesn't work on numpy floats
return round(x, n_digits - 1 - int(np.log10(x)) + (1 if x < 1 else 0))
```
#### File: plunc/intervals/base.py
```python
import numpy as np
import logging
from plunc.common import round_to_digits
from plunc.exceptions import SearchFailedException, InsufficientPrecisionError, OutsideDomainError
from plunc.WaryInterpolator import WaryInterpolator
class IntervalChoice(object):
"""Base interval choice method class
"""
method = 'rank' # 'rank' or 'threshold'
threshold = float('inf')
precision_digits = 2
use_interval_cache = True
wrap_interpolator = True
background = 0
confidence_level = 0.9
max_hypothesis = 1e6
interpolator_log_domain = (-1, 3)
fixed_upper_limit = None
fixed_lower_limit = None
# Use only for testing:
forbid_exact_computation = False
def __init__(self, statistic, **kwargs):
self.statistic = statistic
for k, v in kwargs.items():
setattr(self, k, v)
self.cl = self.confidence_level
self.log = logging.getLogger(self.__class__.__name__)
if self.wrap_interpolator:
self.log.debug("Initializing interpolators")
if self.fixed_lower_limit is None:
self.low_limit_interpolator = WaryInterpolator(precision=10**(-self.precision_digits),
domain=self.interpolator_log_domain)
if self.fixed_upper_limit is None:
self.high_limit_interpolator = WaryInterpolator(precision=10**(-self.precision_digits),
domain=self.interpolator_log_domain)
# "Joints" of the interpolator must have better precision than required of the interpolator results
self.precision_digits += 1
# Dictionary holding "horizontal" intervals: interval on statistic for each precision and hypothesis.
self.cached_intervals = {}
def get_interval_on_statistic(self, hypothesis, precision_digits):
"""Returns the self.cl confidence level interval on self.statistic for the event rate hypothesis
The event rate here includes signal as well as identically distributed background.
Intervals are inclusive = closed.
"""
if self.use_interval_cache and (hypothesis, precision_digits) in self.cached_intervals:
return self.cached_intervals[(hypothesis, precision_digits)]
stat_values, likelihoods = self.statistic.get_values_and_likelihoods(hypothesis,
precision_digits=precision_digits)
likelihoods = likelihoods / np.sum(likelihoods)
# Score each statistic value (method-dependent)
stat_value_scores = self.score_stat_values(statistic_values=stat_values,
likelihoods=likelihoods,
hypothesis=hypothesis)
if self.method == 'threshold':
# Include all statistic values that score higher than some threshold
values_in_interval = stat_values[stat_value_scores > self.get_threshold()]
else:
# Include the values with highest score first, until we reach the desired confidence level
# TODO: wouldn't HIGHEST score first be more user-friendly?
ranks = np.argsort(stat_value_scores)
train_values_sorted = stat_values[ranks]
likelihoods_sorted = likelihoods[ranks]
# Find the last value to include
# (= first value that takes the included probability over the required confidence level)
sum_lhoods = np.cumsum(likelihoods_sorted)
last_index = np.where(sum_lhoods > self.cl)[0][0] # TODO: can fail?
values_in_interval = train_values_sorted[:last_index + 1]
# Limits = extreme values in the interval.
# This means we will be conservative if values_in_interval is not continuous.
low_lim, high_lim = values_in_interval.min(), values_in_interval.max()
# If we included all values given up until a boundary, don't set that boundary as a limit
if low_lim == np.min(stat_values):
low_lim = 0
if high_lim == np.max(stat_values):
high_lim = float('inf')
# Cache and return upper and lower limit on the statistic
if self.use_interval_cache:
self.cached_intervals[(hypothesis, precision_digits)] = low_lim, high_lim
return low_lim, high_lim
def get_confidence_interval(self, value, precision_digits, search_region, debug=False):
"""Performs the Neynman construction to get confidence interval on event rate (mu),
if the statistic is observed to have value
"""
log_value = np.log10(value)
if self.wrap_interpolator:
# Try to interpolate the limit from limits computed earlier
self.log.debug("Trying to get values from interpolators")
try:
if self.fixed_lower_limit is None:
low_limit = 10**(self.low_limit_interpolator(log_value))
else:
low_limit = self.fixed_lower_limit
if self.fixed_upper_limit is None:
high_limit = 10**(self.high_limit_interpolator(log_value))
else:
high_limit = self.fixed_upper_limit
return low_limit, high_limit
except InsufficientPrecisionError:
self.log.debug("Insuffienct precision achieved by interpolators")
if log_value > self.interpolator_log_domain[1]:
self.log.debug("Too high value to dare to start Neyman construction... raising exception")
# It is not safe to do the Neyman construction: too high statistics
raise
self.log.debug("Log value %s is below interpolator log domain max %s "
"=> starting Neyman construction" % (log_value, self.interpolator_log_domain[1]))
except OutsideDomainError:
# The value is below the interpolator domain (e.g. 0 while the domain ends at 10**0 = 1)
pass
if self.forbid_exact_computation:
raise RuntimeError("Exact computation triggered")
def is_value_in(mu):
low_lim, high_lim = self.get_interval_on_statistic(mu + self.background,
precision_digits=precision_digits)
return low_lim <= value <= high_lim
# We first need one value in the interval to bound the limit searches
try:
true_point, low_search_bound, high_search_bound = search_true_instance(is_value_in,
*search_region,
precision_digits=precision_digits)
except SearchFailedException as e:
self.log.debug("Exploratory search could not find a single value in the interval! "
"This is probably a problem with search region, or simply a very extreme case."
"Original exception: %s" % str(e))
if is_value_in(0):
self.log.debug("Oh, ok, only zero is in the interval... Returning (0, 0)")
return 0, 0
return 0, float('inf')
self.log.debug(">>> Exploratory search completed: %s is in interval, "
"search for boundaries in [%s, %s]" % (true_point, low_search_bound, high_search_bound))
if self.fixed_lower_limit is not None:
low_limit = self.fixed_lower_limit
elif is_value_in(low_search_bound):
# If mu=0 can't be excluded, we're apparently only setting an upper limit (mu <= ..)
low_limit = 0
else:
low_limit = bisect_search(is_value_in, low_search_bound, true_point, precision_digits=precision_digits)
self.log.debug(">>> Low limit found at %s" % low_limit)
if self.fixed_upper_limit is not None:
low_limit = self.fixed_upper_limit
elif is_value_in(high_search_bound):
# If max_mu can't be excluded, we're apparently only setting a lower limit (mu >= ..)
high_limit = float('inf')
else:
high_limit = bisect_search(is_value_in, true_point, high_search_bound, precision_digits=precision_digits)
self.log.debug(">>> High limit found at %s" % high_limit)
if self.wrap_interpolator:
# Add the values to the interpolator, if they are within the domain
# TODO: Think about dealing with inf
if self.interpolator_log_domain[0] <= log_value <= self.interpolator_log_domain[1]:
if self.fixed_lower_limit is None:
self.low_limit_interpolator.add_point(log_value, np.log10(low_limit))
if self.fixed_upper_limit is None:
self.high_limit_interpolator.add_point(log_value, np.log10(high_limit))
return low_limit, high_limit
def score_stat_values(self, **kwargs):
# Return "rank" of each hypothesis. Hypotheses with highest ranks will be included first.
raise NotImplementedError()
def __call__(self, observation, precision_digits=None, search_region=None):
"""Perform Neynman construction to get confidence interval on event rate for observation.
"""
if precision_digits is None:
precision_digits = self.precision_digits
if search_region is None:
search_region = [0, round_to_digits(10 + 3 * len(observation), precision_digits)]
if self.statistic.mu_dependent:
value = self.statistic(observation, self.statistic.mus)
else:
value = self.statistic(observation, None)
self.log.debug("Statistic evaluates to %s" % value)
return self.get_confidence_interval(value, precision_digits=precision_digits, search_region=search_region)
def search_true_instance(f, a, b, precision_digits=3, maxiter=10, log=None):
"""Find x in [a, b] where f is True, limiting search to values with precision_digits significant figures.
Returns x, low_bound, high_bound where low_bound and high_bound are either the search bounds a or b, or closer
values to x where f was still found to be False.
# TODO: If asked for precision_digits=5, first search with precision_digits=1, then 2, etc.
print(search_true_instance(lambda x: 11 < x < 13, 0, 40))
print(search_true_instance(lambda x: x < 13, 0, 1000))
"""
log = logging.getLogger('search_true_instance')
values_searched = [a, b]
log.debug("Starting exploratory search in [%s, %s]" % (a, b))
for iter_i in range(maxiter):
# First test halfway, point then 1/4 and 3/4, then 1/8, 3/8, 5/8, 7/8, etc.
fractions = 2**(iter_i + 1)
search_points = [round_to_digits(a + (b - a)*fr, precision_digits)
for fr in np.arange(1, fractions, 2)/fractions]
log.debug("Searching %s - %s (%d points)" % (search_points[0], search_points[-1], len(search_points)))
for x_i, x in enumerate(search_points):
if f(x):
values_searched = np.array(values_searched)
return x, np.max(values_searched[values_searched < x]), np.min(values_searched[values_searched > x])
else:
values_searched.append(x)
if len(search_points) > 1 and np.any(np.diff(search_points) == 0):
raise SearchFailedException("No true value found in search region [%s, %s], "
"but search depth now lower than precision digits (%s). "
"Iteration %d." % (a, b, precision_digits, iter_i))
raise ValueError("Exploratory search failed to converge or terminate - bug? excessive precision?")
def bisect_search(f, a, b, precision_digits=2, maxiter=1e2):
"""Find x in [a, b] where f changes from True to False by bisection,
limiting search to values with precision_digits significant figures.
This is useful if f can cache its results: otherwise just use e.g. scipy.optimize.brentq with rtol.
Avoid scipy.optimize.bisect with rtol, results seem seem to depend heavily on initial bounds: bug??
# TODO: If asked for precision_digits=5, first search with precision_digits=1, then 2, etc.
"""
log = logging.getLogger('bisect_search')
# Which of the bounds gives True? Can't be both!
if f(a) == f(b):
raise ValueError("f must not be true or false on both bounds")
true_on_a = f(a)
log.debug("Starting search between %s (%s) and %s (%s)"
" with %d precision digits" % (a, f(a), b, f(b), precision_digits))
# Do a bisection search, sticking to precision_digits
for iter_i in range(int(maxiter)):
# Find the new bisection point
x = (a + b) / 2
x = round_to_digits(x, precision_digits)
# If we are down to a single point, return that
if x == a or x == b:
return x
true_on_x = f(x)
# Update the appropriate bound
if true_on_a:
if true_on_x:
a = x
else:
b = x
else:
if true_on_x:
b = x
else:
a = x
log.debug("Iteration %d, searching between [%s and %s], last x was %s (%s)" % (iter_i, a, b, x, true_on_x))
else:
raise RuntimeError("Infinite loop encountered in bisection search!")
```
#### File: plunc/plunc/WaryInterpolator.py
```python
import logging
import numpy as np
from plunc.exceptions import InsufficientPrecisionError, OutsideDomainError
class WaryInterpolator(object):
"""Interpolate (and optionally extrapolation) between points,
raising exception if error larger than desired precision
"""
def __init__(self,
points=tuple(), values=tuple(),
precision=0.01, domain=(-1, 3),
if_lower='raise', if_higher='extrapolate',):
"""
:param points:
:param values:
:param precision:
:param domain: (low, high) boundaries of region where interpolation is used
If no values are known at the boundaries, the effective boundary is tighter
:param if_lower: 'extrapolate' or 'raise'
:param if_higher:
:param loglevel:
:return:
"""
self.precision = precision
self.domain = domain
self.if_lower = if_lower
self.if_higher = if_higher
self.log = logging.getLogger('WaryInterpolator')
self.points = np.array(points)
self.values = np.array(values)
def __call__(self, x):
self.log.debug("Asked for x = %s" % x)
if len(self.points) < 3:
raise InsufficientPrecisionError("Need at least three datapoints before we can interpolate or extrapolate")
if x < self.domain[0]:
self.log.debug("Below domain boundary")
if self.if_lower == 'extrapolate':
return self.extrapolate(x)
else:
raise OutsideDomainError("Value %s is below the lowest known value %s" % (x, self.points.min()))
elif x > self.domain[1]:
self.log.debug("Above domain boundary")
if self.if_higher == 'extrapolate':
return self.extrapolate(x)
else:
raise OutsideDomainError("Value %s is above the highest known value %s" % (x, self.points.max()))
else:
return self.interpolate(x)
def interpolate(self, x):
if x in self.points:
self.log.debug("Exact value known")
return self.values[np.nonzero(self.points == x)[0][0]]
max_i = len(self.points) - 1
if not self.points.min() < x < self.points.max():
self.log.debug("%s is in domain, but outside the range of known values. Trying extrapolation." % x)
return self.extrapolate(x)
# Find index of nearest known point to the right
nearest_right = np.searchsorted(self.points, x)
assert 0 < nearest_right < len(self.points)
if nearest_right == 1:
self.log.debug("Only one point to left")
y = self.linear_interpolant(x, 0, 1)
y2 = self.linear_interpolant(x, 0, 2)
diff = 2 * (y - y2)
elif nearest_right == max_i:
self.log.debug("Only one point to right")
y = self.linear_interpolant(x, max_i - 1, max_i)
y2 = self.linear_interpolant(x, max_i - 2, max_i)
diff = 2 * (y - y2)
else:
self.log.debug("At least two points on either side")
y = self.linear_interpolant(x, nearest_right - 1, nearest_right)
y2 = self.linear_interpolant(x, nearest_right - 1, nearest_right + 1)
diff = y - y2
self.log.debug("Close interpolation gives y=%s, far gives y=%s.\n"
"Difference factor %s, precision tolerance %s" % (y, y2, abs(diff / y), self.precision))
if abs(diff / y) > self.precision:
raise InsufficientPrecisionError("Interpolation failed: achieved precision %s, required %s" % (
abs(diff/y), self.precision))
self.log.debug("Interpolation is ok, returning result")
return y
def linear_interpolant(self, x, index_low, index_high):
x0 = self.points[index_low]
x1 = self.points[index_high]
y0 = self.values[index_low]
y1 = self.values[index_high]
return y0 + (y1 - y0) * (x - x0)/(x1 - x0)
def extrapolate(self, x):
# TODO: change to linear regression on all points in configurable part (e.g. 5%) of range (for y2, 2x range)
# Now this is very vulnerable to small errors on datapoints if datapoints are close together near edge
# TODO: option to ignore InsufficientPrecisionError and barge ahead anyway
if x > self.domain[0]:
max_i = len(self.points) - 1
y = self.linear_interpolant(x, max_i - 1, max_i)
y2 = self.linear_interpolant(x, max_i - 2, max_i)
else:
y = self.linear_interpolant(x, 0, 1)
y2 = self.linear_interpolant(x, 0, 2)
diff = 2 * (y - y2)
self.log.debug("Close extrapolation gives y=%s, far gives y=%s.\n"
"Difference factor %s, precision tolerance %s" % (y, y2, abs(diff / y), self.precision))
if abs(diff / y) > self.precision:
raise InsufficientPrecisionError("Extrapolation precision %s estimated, "
"but %s required" % (abs(diff / y), self.precision))
return y
def add_point(self, x, y):
self.add_points(np.array([x]), np.array([y]))
def add_points(self, xs, ys):
if not self.domain[0] <= np.min(xs) <= np.max(xs) <= self.domain[1]:
raise ValueError("Points to add must lie in the domain [%s-%s], but you passed values from %s to %s" % (
self.domain[0], self.domain[1], np.min(xs), np.max(xs)))
self.points = np.concatenate((self.points, xs))
self.values = np.concatenate((self.values, ys))
sort_indices = np.argsort(self.points)
self.points = self.points[sort_indices]
self.values = self.values[sort_indices]
def plot(self):
if not self.loglog_space:
raise NotImplementedError
import matplotlib.pyplot as plt
x = np.logspace(self.domain[0], self.domain[1], 100)
plt.plot(np.log10(x), [np.log10(self.f(q)) for q in x])
plt.plot(self.points, self.values, marker='o')
```
|
{
"source": "JelleAalbers/strax",
"score": 2
}
|
#### File: strax/tests/test_options.py
```python
import strax
import numpy as np
# Initialize. We test both dt time-fields and time time-field
_dtype_name = 'variable'
_dtype = ('variable 1', _dtype_name)
test_dtype = [(_dtype, np.float64)] + strax.time_fields
def test_overwrite():
@strax.takes_config(
strax.Option('option',
default=False),
)
class BasePlugin(strax.Plugin):
"""The plugin that we will be sub-classing"""
provides = 'base'
dtype = test_dtype
provides = 'base'
depends_on = tuple()
def compute(self, something):
return np.ones(len(something), dtype=self.dtype)
st = strax.Context(storage=[])
st.register(BasePlugin)
# Keep an account of this lineage hash such that we can compare it later
lineage_base = st.key_for('0', 'base').lineage_hash
try:
@strax.takes_config(
strax.Option('option',
default=True),
)
class CrashPlugin(BasePlugin):
"""
Try subclassing with a different option default will cause a
runtime error
"""
pass
st.register(CrashPlugin)
except RuntimeError:
print('Ran into a RuntimeError because we tried specifying an '
'option twice. This is exactly what we want!')
@strax.takes_config(
strax.Option('option',
default=True,
overwrite=True),
)
class OverWritePlugin(BasePlugin):
"""Only overwrite the option, the rest is the same"""
pass
st.register(OverWritePlugin)
assert st.key_for('0', 'base').lineage_hash != lineage_base, 'Lineage did not change'
p = st.get_single_plugin('0', 'base')
assert p.__class__.__name__ == 'OverWritePlugin'
assert p.config['option'] is True, f'Option was not overwritten: {p.config}'
```
|
{
"source": "Jelleas/Drones",
"score": 3
}
|
#### File: Jelleas/Drones/drones.py
```python
import math
import json
import visualisation
import dill
import time
import random
class OutOfStockError(Exception):
pass
class Position(object):
def __init__(self, x, y):
self.x = x
self.y = y
def distanceTo(self, pos):
return math.sqrt((self.x - pos.x)**2 + (self.y - pos.y)**2)
def __str__(self):
return "POS [{},{}]".format(self.x, self.y)
def __repr__(self):
return str(self)
class Drone(object):
def __init__(self, name, pos):
self.name = name
self._position = pos
def flyTo(self, pos):
distance = self.distanceTo(pos)
self._position = pos
return distance
def distanceTo(self, pos):
return math.ceil(self._position.distanceTo(pos))
@property
def position(self):
return Position(int(round(self._position.x)), int(round(self._position.y)))
class Customer(object):
def __init__(self, name, pos):
self.name = name
self.position = pos
def __str__(self):
return "CUSTOMER {}".format(self.name)
class Package(object):
def __init__(self, name):
self.name = name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, type(self)) and other.name == self.name
def __str__(self):
return "PACKAGE {}".format(self.name)
def __repr__(self):
return str(self)
class Order(object):
def __init__(self, customer, packages):
self.customer = customer
self.packages = packages
def __str__(self):
return "ORDER [{} : {}]".format(self.customer, self.packages)
def __repr__(self):
return str(self)
class Warehouse(object):
def __init__(self, name, pos, packages):
self.name = name
self.position = pos
packages = packages
self._content = {package : packages.count(package) for package in set(packages)}
def retrieve(self, package):
try:
count = self._content[package] - 1
if count == 0:
del self._content[package]
else:
self._content[package] = count
except KeyError:
raise OutOfStockError()
return package
def __str__(self):
return "WAREHOUSE [{} : {}]".format(self.name, str(self._content))
def __repr__(self):
return str(self)
def __contains__(self, item):
return item in self._content
class Grid(object):
def __init__(self, width, height):
self.width = width
self.height = height
self._grid = [[_Cell() for i in range(self.height)] for j in range(self.width)]
self._items = {}
def placeWarehouse(self, warehouse, pos):
self._grid[pos.x][pos.y].addWarehouse(warehouse)
self._items[warehouse] = pos
def placeDrone(self, drone, pos):
self._grid[pos.x][pos.y].addDrone(drone)
self._items[drone] = pos
def placeCustomer(self, customer, pos):
self._grid[pos.x][pos.y].addCustomer(customer)
self._items[customer] = pos
def warehousesAt(self, pos):
return self._grid[pos.x][pos.y].warehouses
def dronesAt(self, pos):
return self._grid[pos.x][pos.y].drones
def customersAt(self, pos):
return self._grid[pos.x][pos.y].customers
def unplace(self, item):
pos = self._items[item]
del self._items[item]
self._grid[pos.x][pos.y].remove(item)
def display(self):
for i in range(self.height):
for j in range(self.width):
print self._grid[j][i],
print
def __iter__(self):
for i in range(self.height):
for j in range(self.width):
yield Position(j, i)
class _Cell(object):
def __init__(self):
self.customers = []
self.warehouses = []
self.drones = []
def addCustomer(self, customer):
self.customers.append(customer)
def addWarehouse(self, warehouse):
self.warehouses.append(warehouse)
def addDrone(self, drone):
self.drones.append(drone)
def remove(self, item):
for collection in [self.customers, self.warehouses, self.drones]:
try:
collection.remove(item)
break
except ValueError:
pass
def __str__(self):
return "C{}W{}D{}".format(len(self.customers), len(self.warehouses), len(self.drones))
class Simulation(object):
def __init__(self, grid, warehouses, orders, drones, timelimit):
self.grid = grid
self.warehouses = warehouses
for warehouse in self.warehouses:
self.grid.placeWarehouse(warehouse, warehouse.position)
self.orders = _OrderManager(orders)
for order in self.orders:
if order.customer not in self.grid.customersAt(order.customer.position):
self.grid.placeCustomer(order.customer, order.customer.position)
self._drones = {drone : 0 for drone in drones}
for drone in self._drones:
self.grid.placeDrone(drone, drone.position)
self.timelimit = timelimit
@property
def drones(self):
return self._drones.keys()
@property
def cost(self):
return max(self._drones.values())
def droneCost(self, drone):
return self._drones[drone]
def flyDroneTo(self, drone, pos):
self.grid.unplace(drone)
self._drones[drone] += drone.flyTo(pos)
self.grid.placeDrone(drone, drone.position)
def warehousesContaining(self, package):
return [wh for wh in self.warehouses if package in wh]
def claimOrder(self, order):
self.orders.remove(order)
def completeOrder(self, order):
if not self.orders.hasCustomer(order.customer):
self.grid.unplace(order.customer)
def display(self):
self.grid.display()
class _OrderManager(object):
def __init__(self, orders):
self._orders = list(orders)
def remove(self, order):
self._orders.remove(order)
def hasCustomer(self, customer):
return any(order.customer == customer for order in self)
def __getitem__(self, index):
return self._orders[index]
def __len__(self):
return len(self._orders)
def __iter__(self):
for order in self._orders:
yield order
def __nonzero__(self):
return len(self) > 0
def loadSimulation():
warehouses = []
with open("warehouses.json") as warehousesFile:
content = json.loads(warehousesFile.read())
for warehouseName in content:
pos = Position(*content[warehouseName]["position"])
packages = sum(([Package(packageName)] * count for packageName, count in content[warehouseName]["packages"]), [])
warehouses.append(Warehouse(warehouseName, pos, packages))
orders = []
with open("orders.json") as ordersFile:
content = json.loads(ordersFile.read())
for customerName in content:
customer = Customer(customerName, Position(*content[customerName]["position"]))
packages = [Package(packageName) for packageName in content[customerName]["packages"]]
orders.append(Order(customer, packages))
with open("settings.json") as settingsFile:
content = json.loads(settingsFile.read())
grid = Grid(content["width"], content["height"])
drones = [Drone("Drone{}".format(i), Position(0,0)) for i in range(content["drones"])]
timelimit = content["timelimit"]
return Simulation(grid, warehouses, orders, drones, timelimit)
def randomSolve(simulation, visualize = lambda grid : None):
while simulation.orders:
drone = random.choice(simulation.drones)
order = random.choice(simulation.orders)
simulation.claimOrder(order)
for package in order.packages:
warehouse = random.choice(simulation.warehousesContaining(package))
simulation.flyDroneTo(drone, warehouse.position)
visualize(simulation.grid)
simulation.flyDroneTo(drone, order.customer.position)
visualize(simulation.grid)
def greedySolve(simulation, visualize = lambda grid : None):
while simulation.orders:
drone = random.choice(simulation.drones)
order = random.choice(simulation.orders)
simulation.claimOrder(order)
for package in order.packages:
warehouse = min(simulation.warehousesContaining(package), key = lambda wh : drone.distanceTo(wh.position))
simulation.flyDroneTo(drone, warehouse.position)
warehouse.retrieve(package)
visualize(simulation.grid)
simulation.flyDroneTo(drone, order.customer.position)
visualize(simulation.grid)
simulation.completeOrder(order)
if __name__ == "__main__":
simulation = loadSimulation()
simulation.display()
visualisation.visualize(simulation.grid)
greedySolve(simulation, visualize = visualisation.visualize)
print "Total cost : {}".format(simulation.cost)
```
|
{
"source": "jellebosscher/POAT",
"score": 3
}
|
#### File: jellebosscher/POAT/pos_op.py
```python
import numpy as np
from nltk.corpus import wordnet as wn
def pos_op(model, word, pos=None, depth=-1):
"""
Builds a positive operator for a given word. Returns a NxN matrix, where N is the
dimensions of the embedding used, which consists of the sum of outerproducts of its
hyponyms. Case sensitive, will ommit hyponym gathering with proper nouns.
Input: word (str)
Output: NxN matrix (np.array)
"""
n = len(next(iter(model.values())))
output_matrix = np.zeros((n, n))
if word[0].isupper(): #proper noun
return np.outer(model[word], model[word])
closure_set = get_hyponyms(word, pos, depth)
found = 0
for token in set(closure_set):
try:
vec = model[token]
output_matrix = np.add(output_matrix, np.outer(vec,vec))
found += 1
except:
pass
if found == 0:
print(word, " - not found", end="")
if word not in model.keys():
print("and in keys:", word in model.keys())
return None
print()
return np.outer(model[word], model[word])
return output_matrix
def get_hyponyms(word, pos=None, depth=-1):
"""
Takes a word as input and return the transitive hyponymy closure according to wordnet.
Assumes first entries are the correct ones.
Input: word (str), depth (int, -1 means no limit)
Ouput: list of words [str, ..., str]
"""
hyponyms = []
hypo = lambda s: s.hyponyms()
for synset in wn.synsets(word, pos=pos):
closure_syns = list(synset.closure(hypo, depth=depth)) # find transative clusure of synset
closure_syns.append(synset) # include current synset
for syn in closure_syns:
for ln in syn.lemma_names():
hyponyms.append(ln.lower())
return hyponyms
```
|
{
"source": "jellehelsen/niko_homekit",
"score": 3
}
|
#### File: niko_homekit/niko_homekit/cli.py
```python
import asyncio
import logging
import logging.config
from concurrent.futures import Future
import click
from niko_homekit import niko_homekit
# logging.config.fileConfig("logging.conf", os.environ, disable_existing_loggers=False)
LOGGER = logging.getLogger(__name__)
async def run(done):
"""main function"""
LOGGER.debug("Searching for the Niko Home Controller...")
niko = niko_homekit.find_niko()
LOGGER.debug("Controller found at %r", niko.address)
await niko.connect()
LOGGER.debug("Getting driver")
driver = await niko_homekit.get_accessory_driver(niko)
LOGGER.debug("Driver instanciated")
driver.add_job(driver._do_start) # pylint: disable=protected-access
await asyncio.wrap_future(done)
await driver.async_stop()
@click.command()
def main():
"""Console script for niko_homekit."""
try:
done = Future()
print(done)
loop = asyncio.new_event_loop()
loop.create_task(run(done))
loop.run_forever()
except KeyboardInterrupt:
done.set_result(True)
return 0
```
|
{
"source": "jellehuibregtse/peking-express",
"score": 4
}
|
#### File: jellehuibregtse/peking-express/graph.py
```python
from vertex import Vertex
class Graph:
"""
Graph data structure G = (V, E). Vertices contain the information about the edges.
"""
def __init__(self):
self.vertices = {}
self.num_vertices = 0
def add_edge(self, u, v, w):
"""
An edge going from vertex u -> v and v -> u with weight w.
Note that we assume this is an undirected graph.
:param u: vertex
:param v: vertex
:param w: weight
"""
# We add vertex u.
if u not in self.vertices:
self.vertices[u] = Vertex(u)
# We add vertex v.
if v not in self.vertices:
self.vertices[v] = Vertex(v)
self.vertices[u].add_neighbour(v, w)
self.vertices[v].add_neighbour(u, w)
self.num_vertices += 2
def get_vertices(self):
return self.vertices
def get_vertex(self, u):
return self.vertices[u] if u in self.vertices else None
def get_critical_vertices(self):
critical_vertices = []
for u in self.vertices:
if self.vertices[u].critical:
critical_vertices += [self.vertices[u]]
return critical_vertices
def update_critical(self, u, critical=True):
vertex = self.get_vertex(u)
vertex.set_critical(critical)
def print_graph(self) -> str:
result = ''
for u in self.vertices:
result += str(u)
result += " -> "
result += " -> ".join(str(f"{v}({self.vertices[u].neighbours[v]})") for v in self.vertices[u].neighbours)
result += "\n"
return result
```
|
{
"source": "jelleklaver/bellybutton",
"score": 3
}
|
#### File: bellybutton/bellybutton/parsing.py
```python
import re
import ast
import functools
from collections import namedtuple
import os
import yaml
from lxml.etree import XPath
from astpath.search import find_in_ast, file_contents_to_xml_ast
from bellybutton.exceptions import InvalidNode
def constructor(tag=None, pattern=None):
"""Register custom constructor with pyyaml."""
def decorator(f):
if tag is None or f is tag:
tag_ = '!{}'.format(f.__name__)
else:
tag_ = tag
yaml.add_constructor(tag_, f)
if pattern is not None:
yaml.add_implicit_resolver(tag_, re.compile(pattern))
return f
if callable(tag): # little convenience hack to avoid empty arg list
return decorator(tag)
return decorator
def _reraise_with_line_no(fn):
@functools.wraps(fn)
def wrapper(loader, node):
try:
return fn(loader, node)
except Exception as e:
msg = getattr(e, 'message', str(e))
raise InvalidNode(
"line {}: {}.".format(node.start_mark.line + 1, msg)
)
return wrapper
@constructor(pattern=r'\~\+[/\\].+')
@_reraise_with_line_no
def glob(loader, node):
"""Construct glob expressions."""
value = loader.construct_scalar(node)[len('~+/'):]
return os.path.join(
os.path.dirname(loader.name),
value
)
# todo - all exprs return (parsed_expr, contents -> {lines})?
@constructor(pattern=r'/.+')
@_reraise_with_line_no
def xpath(loader, node):
"""Construct XPath expressions."""
value = loader.construct_scalar(node)
return XPath(value)
@constructor
@_reraise_with_line_no
def regex(loader, node):
"""Construct regular expressions."""
value = loader.construct_scalar(node)
return re.compile(value, re.MULTILINE)
@constructor
@_reraise_with_line_no
def verbal(loader, node):
"""Construct verbal expressions."""
values = loader.construct_sequence(node)
pass # todo: verbal expressions
@constructor
@_reraise_with_line_no
def chain(loader, node):
"""Construct pipelines of other constructors."""
values = loader.construct_sequence(node)
pass # todo: chain constructors (viz. xpath then regex)
Settings = namedtuple('Settings', 'included excluded allow_ignore')
@constructor
def settings(loader, node):
values = loader.construct_mapping(node)
try:
return Settings(**values)
except TypeError:
for field in Settings._fields:
if field not in values:
raise InvalidNode(
"!settings node missing required field `{}`.".format(field)
)
raise
Rule = namedtuple('Rule', 'name description expr example instead settings')
def validate_syntax(rule_clause, clause_type):
try:
ast.parse(rule_clause)
except SyntaxError as e:
raise InvalidNode("Invalid syntax in `{}` clause.".format(clause_type))
def _reraise_with_rule_name(fn):
@functools.wraps(fn)
def wrapper(rule_name, *args, **kwargs):
try:
return fn(rule_name, *args, **kwargs)
except Exception as e:
msg = getattr(e, 'message', str(e))
raise InvalidNode("rule `{}`: {}".format(rule_name, msg))
return wrapper
@_reraise_with_rule_name
def parse_rule(rule_name, rule_values, default_settings=None):
rule_description = rule_values.get('description')
if rule_description is None:
raise InvalidNode("No description provided.")
rule_expr = rule_values.get('expr')
if rule_expr is None:
raise InvalidNode("No expression provided.".format(rule_name))
matches = (
lambda x: find_in_ast(
file_contents_to_xml_ast(x),
rule_expr.path,
return_lines=False
)
if isinstance(rule_expr, XPath)
else x.match
)
rule_example = rule_values.get('example')
if rule_example is not None:
validate_syntax(rule_example, clause_type='example')
if not matches(rule_example):
raise InvalidNode("`example` clause is not matched by expression.")
rule_instead = rule_values.get('instead')
if rule_instead is not None:
validate_syntax(rule_instead, clause_type='instead')
if matches(rule_instead):
raise InvalidNode("`instead` clause is matched by expression.")
rule_settings = rule_values.get('settings', default_settings)
if rule_settings is None:
raise InvalidNode("No settings or default settings specified.")
if not isinstance(rule_settings, Settings):
raise InvalidNode("Settings must be a !settings node.")
return Rule(
name=rule_name,
description=rule_description,
expr=rule_expr,
example=rule_example,
instead=rule_instead,
settings=rule_settings,
)
def load_config(fileobj):
"""Load bellybutton config file, returning a list of rules."""
loaded = yaml.load(fileobj, Loader = yaml.FullLoader)
default_settings = loaded.get('default_settings')
rules = [
parse_rule(rule_name, rule_values, default_settings)
for rule_name, rule_values in
loaded.get('rules', {}).items()
]
return rules
```
#### File: tests/unit/test_cli.py
```python
import pytest
from bellybutton import cli
@pytest.mark.parametrize('fn', (
cli.init,
cli.lint,
))
@pytest.mark.parametrize('options', (
' --project-directory .',
' --project-directory=.',
'',
))
def test_interface_exposes_subcommands(fn, options):
"""Ensure argparse interface exposes expected subcommands."""
assert cli.PARSER.parse_args(
'{.__name__}{}'.format(fn, options).split()
).func is fn
```
|
{
"source": "JelleKUL/PythonDataAlignment",
"score": 3
}
|
#### File: PythonDataAlignment/jellepose/session.py
```python
import datetime
import json
import os
from math import sqrt
import numpy as np
import quaternion
import jellepose.params as params
import jellepose.utils as utils
from jellepose.estimation import PoseEstimation
from jellepose.geometrytransform import GeometryTransform
from jellepose.imagetransform import ImageTransform
class Session():
"""This class stores a full session, including all the images and meshes"""
sessionId = "" # the id/name of the session
dirPath = "" # the system path of session directory
globalPosition = np.array([0,0,0]) # the global position of the session origin
globalRotation = quaternion.from_float_array([0,0,0,1]) # the global rotation as a quaternion
boundingBox = [[0,0,0],[0,0,0]] # 3x2 matrix from min x to max z of all the elements in the session
imageTransforms = [] # a list of all the image transforms
geometries = [] # a list of the open3d geometries (meshes/pcd's together)
estimations: PoseEstimation = [] # a list of the estimated guasses including their confidence
fidelity = 1
recordingDate = datetime.datetime.now()
accuracy = []
def __init__(self, id = None, path= None, position= np.array([0,0,0]), rotation= quaternion.from_float_array([0,0,0,1]), images= None, meshes= None):
"""Initialise the session"""
self.sessionId = id
self.dirPath = path
self.globalPosition = position
self.globalRotation = rotation
self.imageTransforms = images
self.meshIds = meshes
pass
def from_dict(self, dict, path):
"""Create a session directly drom a dictionary containing all the data"""
self.sessionId = dict["sessionId"]
self.dirPath = path
self.globalPosition = utils.dict_to_np_vector3(dict["globalPosition"])
self.globalRotation = utils.dict_to_quaternion(dict["globalRotation"])
self.imageTransforms = self.get_images(dict["imageTransforms"])
self.geometries = self.get_geometries(dict["meshIds"])
return self
def from_path(self, path):
"""Create a session using the directory file path"""
sessionFile = open(os.path.join(path, params.JSON_ID),)
sessionData = json.load(sessionFile)
self.from_dict(sessionData,path)
return self
def get_images(self, imageIds):
" returns all the imageTransforms in the session"
self.imageTransforms = []
for file in os.listdir(self.dirPath):
for image in imageIds:
if file.find(image["id"]) != -1:
#a 2D format file is found, now check if it's a pcd or mesh
if file.endswith(tuple(params.IMG_EXTENSION)):
newImg = ImageTransform().from_dict(image, os.path.join(self.dirPath, file))
self.imageTransforms.append(newImg)
return self.imageTransforms
def get_geometries(self, meshIds):
"returns a list of all the geometries in the session"
self.geometries = []
for file in os.listdir(self.dirPath):
for geometry in meshIds:
if file.find(geometry) != -1:
#a 3D format file is found, now check if it's a pcd or mesh
if(file.endswith(tuple(params.MESH_EXTENSION)) or file.endswith(tuple(params.PCD_EXTENSION))):
newGeometry = GeometryTransform().from_dict(geometry, os.path.join(self.dirPath, file), "mesh")
self.geometries.append(newGeometry)
return self.geometries
def get_session_3d_objects(self):
"""Returns all the meshes and image transforms as open3d object to plot"""
objects = []
for image in self.imageTransforms:
objects.append(image.get_camera_geometry(0.2))
for geometry in self.geometries:
objects.append(geometry.get_geometry())
return objects
def get_bounding_box(self):
"""returns a 2x3 numpy matrix containing the min and max values of the sessionData"""
self.boundingBox = np.concatenate((self.imageTransforms[0].pos, self.imageTransforms[0].pos),axis=0).reshape(2,3)
for trans in self.imageTransforms:
print(trans.pos)
self.boundingBox = np.concatenate((np.minimum(trans.pos, self.boundingBox[0]), np.maximum(trans.pos, self.boundingBox[1])),axis=0).reshape(2,3)
return self.boundingBox
def get_bounding_radius(self):
"""Returns a radius from the center points where all the points are in"""
radius = 0
for trans in self.imageTransforms:
distance = np.linalg.norm(trans.pos)
radius = max(radius, distance)
return radius
def get_transformation_matrix(self):
"returns the transformationmatrix of the session"
matrix = quaternion.as_rotation_matrix(np.normalized(self.globalRotation))
matrix = np.concatenate((matrix,self.globalPosition.T), axis = 1)
matrix = np.concatenate((matrix, np.array([0,0,0,1])), axis = 0)
return matrix
def get_rotation_matrix(self):
"""Returns the 3x3 rotation matrix R """
return quaternion.as_rotation_matrix(np.normalized(self.globalRotation))
def add_pose_guess(self, otherSession, R,t, matches, method = ""):
"""Add a pose guess to the session"""
globalRot = otherSession.get_rotation_matrix() @ R
globalPos = np.reshape(np.array(otherSession.globalPosition), (3,1))
trans = np.reshape(otherSession.get_rotation_matrix() @ np.reshape(t, (3,1)), (3,1))
finalPos = globalPos + trans
estimation = PoseEstimation(finalPos, globalRot, matches, method)
self.estimations.append(estimation)
def get_best_pose(self):
"""Determines the best pose based on the confidence and clustering"""
rotations = []
positions = []
weights = []
for estimation in self.estimations:
rotations.append(quaternion.from_rotation_matrix(estimation.rotation))
positions.append(estimation.position)
weights.append(estimation.get_confidence())
Q = np.array(rotations)
T = np.array(positions)
w = np.array(weights)/sum(weights)
averageRotation = utils.weighted_average_quaternions(Q,w)
averagePosition = np.average(T,axis = 0,weights = w)
return averageRotation, averagePosition
def convert_axis(self, mirrorAxis: str = "y"):
posM = np.array([1,1,1])
rotM = np.array([1,1,1,1])
if(mirrorAxis.lower() == "x"):
posM = np.array([-1,1,1])
rotM = np.array([-1,1,1,-1])
if(mirrorAxis.lower() == "y"):
posM = np.array([1,-1,1])
rotM = np.array([-1,1,-1,1])
if(mirrorAxis.lower() == "z"):
posM = np.array([1,1,-1])
rotM = np.array([1,1,-1,-1])
for image in self.imageTransforms:
image.pos *= posM
image.rot = quaternion.from_float_array(quaternion.as_float_array(image.rot) * rotM)
for geometry in self.geometries:
R = geometry.geometry.get_rotation_matrix_from_xyz((0, 0, np.pi)) #rotate to match the opencv axis of Y down
geometry.geometry.rotate(R, center=(0, 0, 0))
#TODO add translation?
def set_global_pos_rot(self,pos, rot):
"""Set the glbal position and rotation of the sesison"""
self.globalPosition = pos
self.globalRotation = rot
def to_json():
"""converts this session object back to json"""
print("converting to json is not yet implemented")
return None
def benchMark_to_session(self, path):
"""converts a benchmark dataset to a session object"""
# crawl the folder to look for .camera files
self.imageTransforms = []
self.geometries = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(".camera"):
# a .camera file is found
fileid = file.replace(".camera", "")
#print("found camera file:",fileid)
with open(os.path.join(root, file)) as f:
lines = f.readlines()
arr = []
for line in lines:
line = " ".join(line.split())
values = line.split(' ')
for value in values:
arr.append(float(value))
# add the array values to the imageTransform
cameraMatrix = np.array([
[arr[0],arr[1],arr[2]],
[arr[3],arr[4],arr[5]],
[arr[6],arr[7],arr[8]]
])
rotationMatrix = np.array([
[arr[12],arr[13],arr[14]],
[arr[15],arr[16],arr[17]],
[arr[18],arr[19],arr[20]]
])
position = np.array([arr[21],arr[22],arr[23]])
newImg = ImageTransform(
id = fileid,
pos = position,
rot= quaternion.from_rotation_matrix(rotationMatrix),
path= os.path.join(path, "images", fileid))
newImg.cameraMatrix = cameraMatrix
self.imageTransforms.append(newImg)
elif file.endswith(".obj"):
print(file)
newGeometry = GeometryTransform().from_path(os.path.join(root, file))
self.geometries.append(newGeometry)
#print(self.imageTransforms)
return self
def sphere_intersection(center1, radius1, center2, radius2):
"""returns true if the 2 spheres are intersecting"""
centerDistance = sqrt(pow(center1[0] + center2[0], 2) + pow(center1[1] + center2[1], 2) + pow(center1[2] + center2[2], 2))
print("centerDistance = " + str(centerDistance))
return centerDistance < (radius1 + radius2)
def find_close_sessions(path: str, coordinates: np.array, maxDistance: float):
"""Finds all the close enough session from a given center point
returns: A list of Session objects tht are within the range of the reference
"""
closeEnoughSessions = []
for root, dir, files in os.walk(path, topdown=False):
for name in files:
if(name.endswith(params.JSON_ID)):
print("Found Session data:", os.path.join(root, name))
session = Session().from_path(root)
if(sphere_intersection(session.globalPosition, session.get_bounding_radius(),coordinates, maxDistance)):
#the point is close enough
print(session.sessionId, ": is close enough")
closeEnoughSessions.append(session)
else:
print(session.sessionId, ": is to far away")
print("These are all the close enough sessions", closeEnoughSessions)
return closeEnoughSessions
```
|
{
"source": "jelleman8/TractSeg",
"score": 3
}
|
#### File: tractseg/libs/data_utils.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import ndimage
import random
from tractseg.libs import img_utils
def pad_and_scale_img_to_square_img(data, target_size=144, nr_cpus=-1):
"""
Expects 3D or 4D image as input.
Does
1. Pad image with 0 to make it square
(if uneven padding -> adds one more px "behind" img; but resulting img shape will be correct)
2. Scale image to target size
"""
nr_dims = len(data.shape)
assert (nr_dims >= 3 and nr_dims <= 4), "image has to be 3D or 4D"
shape = data.shape
biggest_dim = max(shape)
# Pad to make square
if nr_dims == 4:
new_img = np.zeros((biggest_dim, biggest_dim, biggest_dim, shape[3])).astype(data.dtype)
else:
new_img = np.zeros((biggest_dim, biggest_dim, biggest_dim)).astype(data.dtype)
pad1 = (biggest_dim - shape[0]) / 2.
pad2 = (biggest_dim - shape[1]) / 2.
pad3 = (biggest_dim - shape[2]) / 2.
new_img[int(pad1):int(pad1) + shape[0],
int(pad2):int(pad2) + shape[1],
int(pad3):int(pad3) + shape[2]] = data
# Scale to right size
zoom = float(target_size) / biggest_dim
if nr_dims == 4:
#use order=0, otherwise does not work for peak images (results would be wrong)
new_img = img_utils.resize_first_three_dims(new_img, order=0, zoom=zoom, nr_cpus=nr_cpus)
else:
new_img = ndimage.zoom(new_img, zoom, order=0)
transformation = {
"original_shape": shape,
"pad_x": pad1,
"pad_y": pad2,
"pad_z": pad3,
"zoom": zoom
}
return new_img, transformation
def cut_and_scale_img_back_to_original_img(data, t, nr_cpus=-1):
"""
Undo the transformations done with pad_and_scale_img_to_square_img
Args:
data: 3D or 4D image
t: transformation dict
nr_cpus: nr of cpus to use
Returns:
3D or 4D image
"""
nr_dims = len(data.shape)
assert (nr_dims >= 3 and nr_dims <= 4), "image has to be 3D or 4D"
# Back to old size
# use order=0, otherwise image values of a DWI will be quite different after downsampling and upsampling
if nr_dims == 3:
new_data = ndimage.zoom(data, (1. / t["zoom"]), order=0)
elif nr_dims == 4:
new_data = img_utils.resize_first_three_dims(data, order=0, zoom=(1. / t["zoom"]), nr_cpus=nr_cpus)
x_residual = 0
y_residual = 0
z_residual = 0
# check if has 0.5 residual -> we have to cut 1 pixel more at the end
if t["pad_x"] - int(t["pad_x"]) == 0.5:
x_residual = 1
if t["pad_y"] - int(t["pad_y"]) == 0.5:
y_residual = 1
if t["pad_z"] - int(t["pad_z"]) == 0.5:
z_residual = 1
# Cut padding
shape = new_data.shape
new_data = new_data[int(t["pad_x"]): shape[0] - int(t["pad_x"]) - x_residual,
int(t["pad_y"]): shape[1] - int(t["pad_y"]) - y_residual,
int(t["pad_z"]): shape[2] - int(t["pad_z"]) - z_residual]
return new_data
def get_bbox_from_mask(mask, outside_value=0):
mask_voxel_coords = np.where(mask != outside_value)
minzidx = int(np.min(mask_voxel_coords[0]))
maxzidx = int(np.max(mask_voxel_coords[0])) + 1
minxidx = int(np.min(mask_voxel_coords[1]))
maxxidx = int(np.max(mask_voxel_coords[1])) + 1
minyidx = int(np.min(mask_voxel_coords[2]))
maxyidx = int(np.max(mask_voxel_coords[2])) + 1
return [[minzidx, maxzidx], [minxidx, maxxidx], [minyidx, maxyidx]]
def crop_to_bbox(image, bbox):
assert len(image.shape) == 3, "only supports 3d images"
return image[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1], bbox[2][0]:bbox[2][1]]
def crop_to_nonzero(data, seg=None, bbox=None):
original_shape = data.shape
if bbox is None:
bbox = get_bbox_from_mask(data, 0)
cropped_data = []
for c in range(data.shape[3]):
cropped = crop_to_bbox(data[:,:,:,c], bbox)
cropped_data.append(cropped)
data = np.array(cropped_data).transpose(1,2,3,0)
if seg is not None:
cropped_seg = []
for c in range(seg.shape[3]):
cropped = crop_to_bbox(seg[:,:,:,c], bbox)
cropped_seg.append(cropped)
seg = np.array(cropped_seg).transpose(1, 2, 3, 0)
return data, seg, bbox, original_shape
def add_original_zero_padding_again(data, bbox, original_shape, nr_of_classes):
data_new = np.zeros((original_shape[0], original_shape[1], original_shape[2], nr_of_classes)).astype(data.dtype)
data_new[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1], bbox[2][0]:bbox[2][1]] = data
return data_new
def slice_dir_to_int(slice_dir):
"""
Convert slice direction identifier to int.
Args:
slice_dir: x|y|z|xyz (string)
Returns:
0|1|2 (int)
"""
if slice_dir == "xyz":
slice_direction_int = int(round(random.uniform(0, 2)))
elif slice_dir == "x":
slice_direction_int = 0
elif slice_dir == "y":
slice_direction_int = 1
elif slice_dir == "z":
slice_direction_int = 2
else:
raise ValueError("Invalid value for 'training_slice_direction'.")
return slice_direction_int
def sample_slices(data, seg, slice_idxs, slice_direction=0, labels_type=np.int16):
if slice_direction == 0:
x = data[slice_idxs, :, :].astype(np.float32) # (bs, y, z, channels)
y = seg[slice_idxs, :, :].astype(labels_type)
# depth-channel has to be before width and height for Unet (but after batches)
x = np.array(x).transpose(0, 3, 1, 2)
# nr_classes channel has to be before with and height for DataAugmentation (bs, channels, x, y)
y = np.array(y).transpose(0, 3, 1, 2)
elif slice_direction == 1:
x = data[:, slice_idxs, :].astype(np.float32) # (x, bs, z, channels)
y = seg[:, slice_idxs, :].astype(labels_type)
x = np.array(x).transpose(1, 3, 0, 2)
y = np.array(y).transpose(1, 3, 0, 2)
elif slice_direction == 2:
x = data[:, :, slice_idxs].astype(np.float32) # (x, y, bs, channels)
y = seg[:, :, slice_idxs].astype(labels_type)
x = np.array(x).transpose(2, 3, 0, 1)
y = np.array(y).transpose(2, 3, 0, 1)
return x, y
def sample_Xslices(data, seg, slice_idxs, slice_direction=0, labels_type=np.int16, slice_window=5):
"""
Sample slices but add slices_window/2 above and below.
"""
sw = slice_window # slice_window (only odd numbers allowed)
assert sw % 2 == 1, "Slice_window has to be an odd number"
pad = int((sw - 1) / 2)
if slice_direction == 0:
y = seg[slice_idxs, :, :].astype(labels_type)
y = np.array(y).transpose(0, 3, 1, 2) # nr_classes channel has to be before with and height for DataAugmentation (bs, nr_of_classes, x, y)
elif slice_direction == 1:
y = seg[:, slice_idxs, :].astype(labels_type)
y = np.array(y).transpose(1, 3, 0, 2)
elif slice_direction == 2:
y = seg[:, :, slice_idxs].astype(labels_type)
y = np.array(y).transpose(2, 3, 0, 1)
data_pad = np.zeros((data.shape[0] + sw - 1, data.shape[1] + sw - 1, data.shape[2] + sw - 1, data.shape[3])).astype(
data.dtype)
data_pad[pad:-pad, pad:-pad, pad:-pad, :] = data # padded with two slices of zeros on all sides
batch = []
for s_idx in slice_idxs:
if slice_direction == 0:
# (s_idx+2)-2:(s_idx+2)+3 = s_idx:s_idx+5
x = data_pad[s_idx:s_idx + sw:, pad:-pad, pad:-pad, :].astype(np.float32) # (5, y, z, channels)
x = np.array(x).transpose(0, 3, 1, 2) # channels dim has to be before width and height for Unet (but after batches)
x = np.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3])) # (5*channels, y, z)
batch.append(x)
elif slice_direction == 1:
x = data_pad[pad:-pad, s_idx:s_idx + sw, pad:-pad, :].astype(np.float32) # (5, y, z, channels)
x = np.array(x).transpose(1, 3, 0, 2)
x = np.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3])) # (5*channels, y, z)
batch.append(x)
elif slice_direction == 2:
x = data_pad[pad:-pad, pad:-pad, s_idx:s_idx + sw, :].astype(np.float32) # (5, y, z, channels)
x = np.array(x).transpose(2, 3, 0, 1)
x = np.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3])) # (5*channels, y, z)
batch.append(x)
return np.array(batch), y # (bs, channels, x, y)
```
|
{
"source": "jellemdekker/Robotframework-Database-Library",
"score": 3
}
|
#### File: src/DatabaseLibrary/query.py
```python
import sys
from robot.api import logger
class Query(object):
"""
Query handles all the querying done by the Database Library.
"""
def query(self, selectStatement, sansTran=False, returnAsDict=False):
"""
Uses the input `selectStatement` to query for the values that will be returned as a list of tuples. Set optional
input `sansTran` to True to run command without an explicit transaction commit or rollback.
Set optional input `returnAsDict` to True to return values as a list of dictionaries.
Tip: Unless you want to log all column values of the specified rows,
try specifying the column names in your select statements
as much as possible to prevent any unnecessary surprises with schema
changes and to easily see what your [] indexing is trying to retrieve
(i.e. instead of `"select * from my_table"`, try
`"select id, col_1, col_2 from my_table"`).
For example, given we have a table `person` with the following data:
| id | first_name | last_name |
| 1 | <NAME> | See |
When you do the following:
| @{queryResults} | Query | SELECT * FROM person |
| Log Many | @{queryResults} |
You will get the following:
[1, '<NAME>', 'See']
Also, you can do something like this:
| ${queryResults} | Query | SELECT first_name, last_name FROM person |
| Log | ${queryResults[0][1]}, ${queryResults[0][0]} |
And get the following
See, <NAME>
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| @{queryResults} | Query | SELECT * FROM person | True |
"""
cur = None
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Query | %s ' % selectStatement)
self.__execute_sql(cur, selectStatement)
allRows = cur.fetchall()
if returnAsDict:
mappedRows = []
col_names = [c[0] for c in cur.description]
for rowIdx in range(len(allRows)):
d = {}
for colIdx in range(len(allRows[rowIdx])):
d[col_names[colIdx]] = allRows[rowIdx][colIdx]
mappedRows.append(d)
return mappedRows
return allRows
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def row_count(self, selectStatement, sansTran=False):
"""
Uses the input `selectStatement` to query the database and returns the number of rows from the query. Set
optional input `sansTran` to True to run command without an explicit transaction commit or rollback.
For example, given we have a table `person` with the following data:
| id | first_name | last_name |
| 1 | <NAME> | See |
| 2 | Jerry | Schneider |
When you do the following:
| ${rowCount} | Row Count | SELECT * FROM person |
| Log | ${rowCount} |
You will get the following:
2
Also, you can do something like this:
| ${rowCount} | Row Count | SELECT * FROM person WHERE id = 2 |
| Log | ${rowCount} |
And get the following
1
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| ${rowCount} | Row Count | SELECT * FROM person | True |
"""
cur = None
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Row Count | %s ' % selectStatement)
self.__execute_sql(cur, selectStatement)
data = cur.fetchall()
if self.db_api_module_name in ["sqlite3", "ibm_db", "ibm_db_dbi", "pyodbc", "jaydebeapi"]:
rowCount = len(data)
else:
rowCount = cur.rowcount
return rowCount
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def description(self, selectStatement, sansTran=False):
"""
Uses the input `selectStatement` to query a table in the db which will be used to determine the description. Set
optional input `sansTran` to True to run command without an explicit transaction commit or rollback.
For example, given we have a table `person` with the following data:
| id | first_name | last_name |
| 1 | <NAME> | See |
When you do the following:
| @{queryResults} | Description | SELECT * FROM person |
| Log Many | @{queryResults} |
You will get the following:
[Column(name='id', type_code=1043, display_size=None, internal_size=255, precision=None, scale=None, null_ok=None)]
[Column(name='first_name', type_code=1043, display_size=None, internal_size=255, precision=None, scale=None, null_ok=None)]
[Column(name='last_name', type_code=1043, display_size=None, internal_size=255, precision=None, scale=None, null_ok=None)]
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| @{queryResults} | Description | SELECT * FROM person | True |
"""
cur = None
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Description | %s ' % selectStatement)
self.__execute_sql(cur, selectStatement)
description = list(cur.description)
if sys.version_info[0] < 3:
for row in range(0, len(description)):
description[row] = (description[row][0].encode('utf-8'),) + description[row][1:]
return description
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def delete_all_rows_from_table(self, tableName, sansTran=False):
"""
Delete all the rows within a given table. Set optional input `sansTran` to True to run command without an
explicit transaction commit or rollback.
For example, given we have a table `person` in a database
When you do the following:
| Delete All Rows From Table | person |
If all the rows can be successfully deleted, then you will get:
| Delete All Rows From Table | person | # PASS |
If the table doesn't exist or all the data can't be deleted, then you
will get:
| Delete All Rows From Table | first_name | # FAIL |
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| Delete All Rows From Table | person | True |
"""
cur = None
selectStatement = ("DELETE FROM %s;" % tableName)
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Delete All Rows From Table | %s ' % selectStatement)
result = self.__execute_sql(cur, selectStatement)
if result is not None:
if not sansTran:
self._dbconnection.commit()
return result
if not sansTran:
self._dbconnection.commit()
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def execute_sql_script(self, sqlScriptFileName, sansTran=False):
"""
Executes the content of the `sqlScriptFileName` as SQL commands. Useful for setting the database to a known
state before running your tests, or clearing out your test data after running each a test. Set optional input
`sansTran` to True to run command without an explicit transaction commit or rollback.
Sample usage :
| Execute Sql Script | ${EXECDIR}${/}resources${/}DDL-setup.sql |
| Execute Sql Script | ${EXECDIR}${/}resources${/}DML-setup.sql |
| #interesting stuff here |
| Execute Sql Script | ${EXECDIR}${/}resources${/}DML-teardown.sql |
| Execute Sql Script | ${EXECDIR}${/}resources${/}DDL-teardown.sql |
SQL commands are expected to be delimited by a semi-colon (';').
For example:
DELETE FROM person_employee_table;
DELETE FROM person_table;
DELETE FROM employee_table;
Also, the last SQL command can optionally omit its trailing semi-colon.
For example:
DELETE FROM person_employee_table;
DELETE FROM person_table;
DELETE FROM employee_table
Given this, that means you can create spread your SQL commands in several
lines.
For example:
DELETE
FROM person_employee_table;
DELETE
FROM person_table;
DELETE
FROM employee_table
However, lines that starts with a number sign (`#`) are treated as a
commented line. Thus, none of the contents of that line will be executed.
For example:
# Delete the bridging table first...
DELETE
FROM person_employee_table;
# ...and then the bridged tables.
DELETE
FROM person_table;
DELETE
FROM employee_table
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| Execute Sql Script | ${EXECDIR}${/}resources${/}DDL-setup.sql | True |
"""
sqlScriptFile = open(sqlScriptFileName ,encoding='UTF-8')
cur = None
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Execute SQL Script | %s ' % sqlScriptFileName)
sqlStatement = ''
for line in sqlScriptFile:
PY3K = sys.version_info >= (3, 0)
if not PY3K:
#spName = spName.encode('ascii', 'ignore')
line = line.strip().decode("utf-8")
if line.startswith('#'):
continue
elif line.startswith('--'):
continue
sqlFragments = line.split(';')
if len(sqlFragments) == 1:
sqlStatement += line + ' '
else:
for sqlFragment in sqlFragments:
sqlFragment = sqlFragment.strip()
if len(sqlFragment) == 0:
continue
sqlStatement += sqlFragment + ' '
self.__execute_sql(cur, sqlStatement)
sqlStatement = ''
sqlStatement = sqlStatement.strip()
if len(sqlStatement) != 0:
self.__execute_sql(cur, sqlStatement)
if not sansTran:
self._dbconnection.commit()
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def execute_sql_string(self, sqlString, sansTran=False):
"""
Executes the sqlString as SQL commands. Useful to pass arguments to your sql. Set optional input `sansTran` to
True to run command without an explicit transaction commit or rollback.
SQL commands are expected to be delimited by a semi-colon (';').
For example:
| Execute Sql String | DELETE FROM person_employee_table; DELETE FROM person_table |
For example with an argument:
| Execute Sql String | SELECT * FROM person WHERE first_name = ${FIRSTNAME} |
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| Execute Sql String | DELETE FROM person_employee_table; DELETE FROM person_table | True |
"""
cur = None
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Execute SQL String | %s ' % sqlString)
self.__execute_sql(cur, sqlString)
if not sansTran:
self._dbconnection.commit()
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def call_stored_procedure(self, spName, spParams=None, sansTran=False):
"""
Uses the inputs of `spName` and 'spParams' to call a stored procedure. Set optional input `sansTran` to
True to run command without an explicit transaction commit or rollback.
spName should be the stored procedure name itself
spParams [Optional] should be a List of the parameters being sent in. The list can be one or multiple items.
The return from this keyword will always be a list.
Example:
| @{ParamList} = | Create List | FirstParam | SecondParam | ThirdParam |
| @{QueryResults} = | Call Stored Procedure | DBName.SchemaName.StoredProcName | List of Parameters |
Example:
| @{ParamList} = | Create List | Testing | LastName |
| Set Test Variable | ${SPName} = | DBTest.DBSchema.MyStoredProc |
| @{QueryResults} = | Call Stored Procedure | ${SPName} | ${ParamList} |
| Log List | @{QueryResults} |
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| @{QueryResults} = | Call Stored Procedure | DBName.SchemaName.StoredProcName | List of Parameters | True |
"""
if spParams is None:
spParams = []
cur = None
try:
if self.db_api_module_name in ["cx_Oracle"]:
cur = self._dbconnection.cursor()
else:
cur = self._dbconnection.cursor(as_dict=False)
PY3K = sys.version_info >= (3, 0)
if not PY3K:
spName = spName.encode('ascii', 'ignore')
logger.info('Executing : Call Stored Procedure | %s | %s ' % (spName, spParams))
cur.callproc(spName, spParams)
cur.nextset()
retVal=list()
for row in cur:
#logger.info ( ' %s ' % (row))
retVal.append(row)
if not sansTran:
self._dbconnection.commit()
return retVal
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def __execute_sql(self, cur, sqlStatement):
return cur.execute(sqlStatement)
```
|
{
"source": "jellena/audubon-cbc",
"score": 4
}
|
#### File: audubon-cbc/scripts/download.py
```python
import gdown
import os
import pathlib
# URLs for project data.
URLS = {
"raw": "https://drive.google.com/uc?id=1vwC_m-wFaX-4brrHOrTVqVFDZDJ6y3gN",
"clean": "https://drive.google.com/uc?id=1f_qNLG_WwPAUqIeLlD4uxRK4T8oK8sx0",
}
# Project root directory.
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
# Paths to project directories.
PATHS = {
"raw": os.path.join(ROOT, "data", "Cloud_Data",
"cbc_effort_weather_1900-2018.txt"),
"clean": os.path.join(ROOT, "data", "Cloud_Data",
"1.0-rec-initial-data-cleaning.txt"),
}
def download_raw():
"""Download the raw Christmas Bird Count data."""
download(URLS["raw"], PATHS["raw"])
def download_clean():
"""Download the cleaned Christmas Bird Count data.
.. note::
For reproducibility, it might be better to clean the raw data locally
rather than downloading a cleaned version from the cloud.
"""
download(URLS["clean"], PATHS["clean"])
def download(url, path):
"""Download project data from the cloud.
Args:
url (str): URL from which to fetch data.
path (str): Directory in which to store data.
"""
cwd = os.getcwd()
os.chdir(os.path.dirname(path))
gdown.download(url)
os.chdir(cwd)
if __name__ == "__main__":
# Make the data/Cloud_Data/ directory if it doesn't exist.
datadir = pathlib.Path(os.path.dirname(PATHS["raw"]))
datadir.mkdir(parents=True, exist_ok=True)
# Download the each file if it doesn't already exist.
for file in ["raw", "clean"]:
if os.path.isfile(PATHS[file]):
print(PATHS[file], "already exists.")
else:
download(URLS[file], PATHS[file])
```
|
{
"source": "jelleschutter/wikidata-plain-sparql",
"score": 2
}
|
#### File: wikidata-plain-sparql/wikidata_plain_sparql/__init__.py
```python
import pandas as pd
import requests
import json
import re
import math
from bokeh.plotting import figure, ColumnDataSource, show, output_notebook
from bokeh.models import HoverTool, BoxZoomTool, WheelZoomTool
from bokeh.tile_providers import CARTODBPOSITRON, get_provider
def query(query, view=None):
data = __load_from_api(query)
if view == 'map':
return __render_map(data)
return __json_to_df(data)
def __render_map(data):
columns = data['head']['vars']
lat = []
lon = []
info = {}
for column in columns:
info['info_' + column] = []
k = 6378137
lon_ratio = k * math.pi / 180.0
lat_ratio = math.pi / 360.0
for result in data['results']['bindings']:
for column in columns:
popupText = ''
if column in result:
if 'datatype' in result[column] and result[column]['datatype'] == 'http://www.opengis.net/ont/geosparql#wktLiteral':
pointStr = result[column]['value']
coordStr = re.search('^Point\((.*)\)$', pointStr)
if coordStr:
coordsLatLong = coordStr[1].split(' ')
latWebMercator = math.log(math.tan((90 + float(coordsLatLong[1])) * lat_ratio)) * k
lonWebMercator = float(coordsLatLong[0]) * lon_ratio
lon.append(lonWebMercator)
lat.append(latWebMercator)
if ('info_' + column) in info:
info.pop('info_' + column, None)
else:
if result[column]['type'] == 'uri':
popupText = result[column]['value'].replace('http://www.wikidata.org/entity/', '')
else:
popupText = result[column]['value']
if 'info_' + column in info and isinstance(info['info_' + column], list):
info['info_' + column].append(popupText)
if len(lat) == 0 or len(lon) == 0:
print('Unable to render map: no results.')
return None
info['lat'] = lat
info['lon'] = lon
source = ColumnDataSource(data=info)
maxLat = max(lat)
minLat = min(lat)
rangeLat = maxLat - minLat
marginLat = rangeLat / 5
maxLon = max(lon)
minLon = min(lon)
rangeLon = maxLon - minLon
marginLon = rangeLon / 5
output_notebook()
tile_provider = get_provider(CARTODBPOSITRON)
p = figure(
x_range=(minLon - marginLon, maxLon + marginLon),
y_range=(minLat - marginLat, maxLat + marginLat),
x_axis_type='mercator',
y_axis_type='mercator',
match_aspect=True,
tools='pan,reset'
)
p.add_tile(tile_provider)
p.circle(x='lon', y='lat', size=10, fill_color='blue', fill_alpha=0.8, source=source)
tooltips = []
for column in columns:
if 'info_' + column in info:
tooltips.append((column, '@info_' + column))
# keep aspect ratio while zooming
p.add_tools(BoxZoomTool(match_aspect=True))
wheel_zoom = WheelZoomTool(zoom_on_axis=False)
p.add_tools(wheel_zoom)
p.toolbar.active_scroll = wheel_zoom
p.add_tools(HoverTool(tooltips=tooltips))
return show(p)
def __json_to_df(data):
# create empty data frame
df = pd.DataFrame(columns = data['head']['vars'])
# iterate through all results
for result in data['results']['bindings']:
# flatten result objects (result <- result.value)
mappedResult = {}
for column in result:
mappedResult[column] = result[column]['value']
# append result to data frame
df = df.append(mappedResult, ignore_index = True)
return df
def __load_from_api(query):
url = 'https://query.wikidata.org/sparql'
payload = {
'query': query
}
# add header to receive result as json
headers = {
'Accept': 'application/sparql-results+json'
}
while True:
response = requests.get(url, params = payload, headers = headers)
# check if request was successful
if response.ok:
# convert json to dict
return json.loads(response.content)
else:
# raise exception in case of http error
response.raise_for_status()
break
raise Exception
```
|
{
"source": "JelleStiesri/IPASS",
"score": 3
}
|
#### File: IPASS/Source/application.py
```python
from tkinter import messagebox
from Source import plotGraph, addressToText, calculateRoute, createGraph, createMapsUrl
import tkinter as tk
from tkinter import *
import webbrowser
proportion = [900, 500] # Beeldverhouding van het programma in pixels
AddressFile = "addresses.txt" # Bestand waarin de adressen worden opgeslagen
def storeGUI():
"""Deze functie word aangeroepen om de applicatie te starten. In het eerste scherm word gevraagd vanuit welke
winkel er bezorgd gaat worden (begin/eindbestemming). Hierna word er doorgeklikt naar volgende pagina's"""
Master = Tk()
Master.resizable(False, False) # Verhoudingen blijven hetzelfde
Master.title('Winkels')
f = Frame(Master, bg='#edf1f9', width=proportion[0], height=proportion[1])
f.grid(row=0, column=0, sticky="NW")
f.grid_propagate(0)
f.update()
photo = PhotoImage(file="utrechtmap.png") # Achtergrondfoto
w = Label(Master, image=photo)
w.photo = photo
w.place(x=450, y=150, anchor="center")
Label(Master, bg="#E5EBF8", fg="#06445C", font=("Times", 18), text="Winkels", borderwidth=2, relief="groove",
width=13, height=2).place(x=155, y=55, anchor="center")
def checkIfMenuIsFilled(Event):
"""Deze functie checkt of er een winkelkeuze is gemaakt en geeft vervolgens de knop vrij."""
x = v.get()
if x != 0:
proceedButton.config(state='normal')
else:
pass
def endMenu():
"""Deze functie word aangeroepen om naar het volgende scherm te gaan, hij delete het menu scherm."""
Master.destroy()
addressesGUI(v.get() - 1)
"""Gui code"""
Label(Master, bg="#c0c3ce", fg="#06445C", font=("Times", 13), text="Selecteer hier het vertrekpunt", borderwidth=2,
relief="groove", width=30, height=2).place(x=450, y=55,
anchor="center")
v = tk.IntVar(Master)
R1 = Radiobutton(Master, bg="#F2D6DC", fg="#06445C", font=("Times", 10), text='<NAME>', borderwidth=2,
relief="groove", width=23, height=2, variable=v, value=1)
R1.place(x=450, y=125, anchor="center")
R2 = Radiobutton(Master, bg="#F2D6DC", fg="#06445C", font=("Times", 10), text='<NAME>',
borderwidth=2, relief="groove", width=23, height=2, variable=v, value=2)
R2.place(x=450, y=175, anchor="center")
R3 = Radiobutton(Master, bg="#F2D6DC", fg="#06445C", font=("Times", 10), text='<NAME>', borderwidth=2,
relief="groove", width=23, height=2, variable=v, value=3)
R3.place(x=450, y=225, anchor="center")
proceedButton = tk.Button(Master, bg="#c0c3ce", fg="#06445C", font=("Times", 13), text='Volgende', borderwidth=2,
relief="groove", width=12, height=3, command=endMenu)
proceedButton.place(x=450, y=390, anchor="center")
proceedButton.config(state='disabled')
Master.bind("<Enter>", checkIfMenuIsFilled) # Checkt of er iets is ingevuld
mainloop()
def addressesGUI(storeNumber):
"""Onder deze functie zitten alle functionaliteiten verzameld voor het invoeren en valideren
van adressen. De gebruiken kan de adresinformatie invoeren, dit word goed/afgekeurd en zal vervolgens
in het bestand 'addresses.txt' worden gezet."""
Master = Tk()
Master.resizable(False, False)
Master.title('Adressen invoeren')
f = Frame(Master, bg='#edf1f9', width=proportion[0], height=proportion[1])
f.grid(row=0, column=0, sticky="NW")
f.grid_propagate(0)
f.update()
Label(Master, bg="#E5EBF8", fg="#06445C", font=("Times", 18), text="Adressen invoeren", borderwidth=2,
relief="groove", width=20, height=2).place(x=155, y=55, anchor="center")
landNummer = "NL"
addressList = [] # Om aantal te checken
weightList = []
def checkIfAdress(Event):
"""Deze functie activeert de knoppen 'next' en 'invoer' wanneer er aan de eisen is voldaan."""
x = street.get()
y = place.get()
z = number.get()
w = weight.get()
if len(addressList) >= 2: # De gebruiker kan verder als er minstends 2 adressen zijn ingevoerd
nextButtonEco.config(state='normal')
nextButtonShortest.config(state='normal')
if x and y and z and w: # Gebruiker kan invoer plaatsten wanneer alles is ingevuld
inputButton.config(state='normal')
else:
pass
def errorPopup(message):
"""Deze functie laat een error zien bij een foute invoer."""
messagebox.showerror("Foute invoer", message)
def addAdress():
"""Deze functie maakt het mogelijk om (bezorg)adressen in te voeren
& deze voert ze meteen in in een textbestand (addresses.txt)"""
streetStrip = street.get().replace(" ", "+")
if streetStrip[len(streetStrip) - 1] == "+":
streetStrip = streetStrip[:-1]
placeStrip = place.get().replace(" ", "+")
if placeStrip[len(placeStrip) - 1] == "+":
placeStrip = placeStrip[:-1]
address = {'string': "{}+{}+{}+{}\n".format(number.get(), streetStrip,
placeStrip, landNummer)
, 'lenStreet': len(street.get().split()), 'lenPlace': len(place.get().split())}
"""Adressen Checkers"""
if street.get() == "": # Geen straatnaam ingevoerd
errorPopup("Voer een straatnaam in")
elif number.get() == "": # Geen huisnummer ingevoerd
errorPopup("Voer een huisnummer in")
elif place.get() == "": # Geen woontplaats ingevoerd
errorPopup("Voer een woonplaats in")
elif weight.get() == "": # Geen gewicht ingevoerd
errorPopup("Voer een geldig gewicht in")
elif address['string'] in addressList: # Dubbele invoer
errorPopup("Dit adres is al ingevoerd")
elif len(address['string']) > 50: # Adres te lang
errorPopup("Dit adres is te lang")
elif len(addressList) > 8: # Max 9 adressen (want anders te weinig memory)
errorPopup("Er mogen maximaal 9 bezorgadressen worden ingevoerd")
elif not number.get().isdigit(): # Er word geen cijfer ingevoerd
errorPopup("Voer cijfers in als huisnummer:")
elif not weight.get().isdigit():
errorPopup("Voer cijfers in als gewicht")
else:
addressList.append(address['string'])
with open("addresses.txt", "a") as addressFile:
addressFile.write(address['string']) # Voegt adres toe aan bestand
addressFile.close()
waypointLabel = Label(Master, bg="#edf1f9", fg="#06445C", font=("Times", 12),
text=addressToText.adressToText(address))
waypointLabel.place(x=22, y=(200 + (26 * len(addressList))), anchor="w")
weightList.append(int(weight.get()))
street.set("")
number.set("")
place.set("")
weight.set("")
inputButton.config(state='disabled')
def endAdressesShortest():
"""Deze functie word aangeroepen als er op 'next' is gedrukt. Deze berekent de korste route"""
addressToText.adressToText(storeAddress)
Master.destroy()
routeGUI(weightList, False)
def endAdressesEco():
"""Deze functie word aangeroepen als er op 'next' is gedrukt. Deze berekent de zuinigste route"""
addressToText.adressToText(storeAddress)
Master.destroy()
routeGUI(weightList, True)
store = (open("Stores.txt").readlines())[storeNumber]
with open("addressesString.txt", "w+") as adressStringFile: # Maakt het bestand opnieuw (leeg) aan
adressStringFile.close()
with open("addresses.txt", "w+") as adressFile: # Maakt het bestand opnieuw (leeg) aan
adressFile.write(store)
adressFile.close()
"""Gui code"""
storeAddress = {'string': store, 'lenStreet': 1, 'lenPlace': 1}
Label(Master, bg="#c0c3ce", fg="#06445C", font=("Times", 13), text="Ingevoerde adressen:",
borderwidth=2, relief="groove", width=30, height=2).place(x=152, y=155, anchor="center")
winkelLabel = Label(Master, bg="#edf1f9", fg="#06445C", font=("Times", 12),
text=addressToText.adressToText(storeAddress) + " [Winkel]")
winkelLabel.place(x=22, y=200, anchor="w")
Label(Master, bg="#c0c3ce", fg="#06445C", font=("Times", 13), text="Voer hier de bezorgadressen in",
borderwidth=2, relief="groove", width=30, height=2).place(x=570, y=65, anchor="center")
street = StringVar()
Label(Master, bg="#edf1f9", fg="#06445C", font=("Times", 12), text='Straatnaam').place(x=430, y=120, anchor="w")
Entry(Master, textvariable=street, width=30, borderwidth=2, relief="groove", ).place(x=522, y=120, anchor="w")
number = StringVar()
Label(Master, bg="#edf1f9", fg="#06445C", font=("Times", 12), text='Huisnummer').place(x=430, y=170, anchor="w")
Entry(Master, textvariable=number, width=6, borderwidth=2, relief="groove", ).place(x=522, y=170, anchor="w")
place = StringVar()
Label(Master, bg="#edf1f9", fg="#06445C", font=("Times", 12), text='Plaats').place(x=430, y=220, anchor="w")
Entry(Master, textvariable=place, width=25, borderwidth=2, relief="groove", ).place(x=522, y=220, anchor="w")
weight = StringVar()
Label(Master, bg="#edf1f9", fg="#06445C", font=("Times", 12), text='Gewicht [Kg]').place(x=430, y=270, anchor="w")
Entry(Master, textvariable=weight, width=6, borderwidth=2, relief="groove", ).place(x=522, y=270, anchor="w")
inputButton = tk.Button(Master, bg="#F2D6DC", fg="#06445C", font=("Times", 13), text='Invoeren',
borderwidth=2, relief="groove", width=10, height=2, command=addAdress)
inputButton.place(x=430, y=325, anchor="w")
inputButton.config(state='disabled')
Label(Master, bg="#c0c3ce", fg="#06445C", font=("Times", 13), text="Route genereren",
borderwidth=2, relief="groove", width=30, height=2).place(x=570, y=400, anchor="center")
nextButtonShortest = tk.Button(Master, bg="#F2D6DC", fg="#06445C", font=("Times", 13), text='Kortste route',
borderwidth=2, relief="groove", width=13, height=2, command=endAdressesShortest)
# Met deze knop kies je voor de kortste route
nextButtonShortest.place(x=432, y=450, anchor="w")
nextButtonShortest.config(state='disabled')
nextButtonEco = tk.Button(Master, bg="#F2D6DC", fg="#06445C", font=("Times", 13), text='Zuinigste route',
borderwidth=2, relief="groove", width=13, height=2, command=endAdressesEco)
# Met deze knop kies je voor de zuinigste route
nextButtonEco.place(x=582, y=450, anchor="w")
nextButtonEco.config(state='disabled')
Master.bind("<Enter>", checkIfAdress)
def routeGUI(weights, isEco):
"""Binnen deze functie word de route gemaakt doormiddel van het algoritme 'calculateRoute'. Deze functie
word aangeroepen na 'addressesGUI' waarin alle adressen al zijn ingevoergd. De gebruiker kan zien welke route
er is berekend, dus ook op welke volgorde er bezorgd gaat worden. Ook staat heir een linkje naar google maps
waarin de route al is ingeladen."""
Master = Tk()
Master.resizable(False, False)
Master.title('Route')
f = Frame(Master, bg='#edf1f9', width=proportion[0], height=proportion[1])
f.grid(row=0, column=0, sticky="NW")
f.grid_propagate(0)
f.update()
def openURL():
webbrowser.open(createMapsUrl.createMapsURL(route), new=1)
def endRouteGui():
"""Deze functie word aangeroepen als er op 'Reset' is gedrukt. Deze start de applicatie opnieuw"""
Master.destroy()
storeGUI()
"""Maakt de graaf en runt het algoritme"""
matrix = createGraph.main(AddressFile)
routeData = calculateRoute.main(matrix, weights, isEco) # 'isEco' is de keuze tussen zuinig / kort
route = routeData['route']
plotGraph.visualizeGraph(route) # Plot de graaf en maakt er een PNG van
"""Gui code"""
photo = PhotoImage(file="graphplot.png") # Foto van route
load = Label(Master, image=photo)
load.photo = photo
load.place(x=580, y=265, anchor="center")
Label(Master, bg="#E5EBF8", fg="#06445C", font=("Times", 18), text="Route", borderwidth=2, relief="groove",
width=13, height=2).place(x=125, y=55, anchor="center")
Label(Master, bg="#c0c3ce", fg="#06445C", font=("Times", 13), text="Berekende route", borderwidth=2,
relief="groove", width=30, height=2).place(x=580, y=55, anchor="center")
"""Print de route op volgorde met een letter ervoor"""
routeString = ""
alphabetCount = ""
for point in range(len(route)):
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if point != len(route) - 1:
alphabetCount += alphabet[route[point]] + ":\n"
else:
alphabetCount += alphabet[0] + ":\n"
pointAddress = (open("addressesString.txt").readlines())[route[point]].split('\n')[0]
routeString += (pointAddress.split("\n")[0] + "\n")
"""Gui code"""
alphabetLabel = Label(Master, bg="#edf1f9", fg="#06445C", font=("Times", 12), text=alphabetCount)
alphabetLabel.place(x=40, y=180, anchor="w")
addressLabel = Label(Master, bg="#edf1f9", fg="#06445C", font=("Times", 12), text=routeString)
addressLabel.place(x=60, y=180, anchor="w")
fuelLabel = Label(Master, bg="#edf1f9", fg="#06445C", font=("Times", 12),
text="Verbruik: "+str(round(calculateRoute.fuelUsage(route, weights, matrix), 2))+"L")
fuelLabel.place(x=55, y=400, anchor="w")
routeButton = Button(Master, bg="#F2D6DC", fg="#06445C", font=("Times", 13), text="Routebeschrijving",
borderwidth=2, relief="groove", width=15, height=3, command=openURL)
routeButton.place(x=125, y=450, anchor="center")
resetButton = Button(Master, bg="#F2D6DC", fg="#06445C", font=("Times", 13), text="Reset",
borderwidth=2, relief="groove", width=10, height=1, command=endRouteGui)
resetButton.place(x=770, y=480, anchor="center")
storeGUI()
```
#### File: IPASS/Source/calculateRoute.py
```python
def travellingSalesmanProblem(data, countFuel):
"""Return de route (lijst met vertex) + afstand"""
graph = data['graph']
weight = data['weight']
store = data['store']
"""Invoer vallideren"""
if len(graph) < 2:
raise Exception("Graaf te klein, kleiner dan 2 heeft geen zin")
if len(graph) != len(graph[0]):
raise Exception("Matrix moet vierkant zijn")
if store != 0:
raise Exception("Winkel moet op node 0 zijn")
if len(graph) > 15:
raise Exception("Graaf te groot, Eventuele memory problemen")
print("Invoer gevallideert")
best_route = {'route': [store]} # Route begint altijd bij de winkel
vertex = set(range(1, len(graph))) # Alle vortex in een set (-0 want dat is de winkel)
currentVertex = store
while True:
cost, nextStep = calculateNextStep(currentVertex, vertex, graph, weight, countFuel)
if nextStep == 0: # calculateNextStep returt 0 wanneer de route berekend is
break
best_route['route'].append(nextStep)
currentVertex = nextStep
vertex -= {nextStep}
best_route['route'].append(0) # Laatste punt op route is altijd de winkel
best_route['route'] = tuple(reversed(best_route['route']))
# De route word andersom berekend, daarom word deze nu nog is omgekeerd om de goede volgorde te krijgen
best_route['cost'] = 0
for number in range(len(best_route['route']) - 1):
best_route['cost'] += distance(best_route['route'][number], best_route['route'][number + 1], graph, weight,
False)
# Berekent lengte van route in meters, zonder dat het gewicht invloed heeft op de afstand
return best_route
def calculateNextStep(start, vertexSet, graph, weight, weightCounts):
"""Deze functie returnt de route van een begin (mag alles zijn) tot de eind vortex (altijd 0!)
+ de afgelegde afstand"""
if vertexSet: # Wanneer er een item in vertexSet zit
return min((distance(place, start, graph, weight, weightCounts) + calculateNextStep(place, vertexSet - {place},
graph, weight, weightCounts)[0], place) for place in vertexSet)
# g(i,s) = min{C i,k + g(k,s-{k})} -- s = set met vortex, i = startvortex
else:
return graph[0][start], 0 # Laatste stap = Vertex -> Startpunt
def distance(begin, destination, graph, weight, weightCounts):
"""Deze functie berekent de afstand tussen 2 vertex.Wanneer weightCounts True is
houd hij de Heuristiek ook rekening met het gewicht van de paketten om zo brandstof te besparen"""
if weightCounts:
totalWeigt = sum(weight)
packageWeight = weight[destination - 1]
return ((((abs(((totalWeigt - packageWeight) - totalWeigt) / totalWeigt) * 100) * 4) + 100) / 100) * \
graph[begin][destination] # Voor elk % van totaalgewicht > +4% afstand
else: # Afstand berekenen zonder brandstofverbruik
return graph[begin][destination]
def fuelUsage(route, weight, graph):
"""Deze functie berekend het brandstofverbruik van de wagen. Het gemiddelde gebruik van een bakwagen (kleine
vrachtwagen) is 1:6. Dit word beinvloed door het gewicht van de lading aan boord (6.7% per 100kg)"""
usage = 0 # Het totale verbruik in Liter Diesel\
totalWeight = sum(weight)
for point in range(len(route) - 1):
dist = graph[route[point]][route[point + 1]]
multyplier = (((totalWeight / 100) * 6.7) + 100) / 100 # 6,7% meer verbruik per 100kg
tempUsage = (dist / 6000) * multyplier # Basisverbruik = 6:1
usage += tempUsage
if route[point + 1] == 0: # Winkel heeft geen aflevergewicht
pass
else:
totalWeight -= weight[route[point + 1] - 1]
return usage
def main(matrix, weight, countFuel):
"""Deze functie stuurt het hele TSP aan en returnt de uiteindelijke route"""
routeData = travellingSalesmanProblem({'graph': matrix, 'weight': weight, 'store': 0}, countFuel)
data = {'route': routeData['route'], 'cost': routeData['cost'], 'usage': fuelUsage(routeData['route'], weight, matrix)}
return data
```
#### File: IPASS/Source/createMapsUrl.py
```python
def createMapsURL(route):
"""Deze functie maakt een URL waarmee de gebruiker meteen de route in google maps kan bekijken,
Voorbeeld: https://www.google.com/maps/dir/?api=1&origin=27+Julianalaan+Bilthoven,+NL&destination=27+Julianalaan+
Bilthoven,+NL&travelmode=driving&waypoints=10+Acacialaan+Bilthoven+NL%7C4+Gruttolaan+Maartensdijk+NL"""
baseURL = 'https://www.google.com/maps/dir/?api=1&origin=' # Stuk van URL dat altijd zelfde is
waypoints = ''
for point in route:
if point == 0: # de winkel is geen waypoint
storeAddress = (open("addresses.txt").readlines())[point].split('\n')[0]
else:
pointAddress = (open("addresses.txt").readlines())[point]
waypoints += (pointAddress.split()[0] + '%7C') # %7C is de scheiding tussen adressen
mapsURL = baseURL + storeAddress + '&destination=' + storeAddress + '&travelmode=driving&waypoints=' + waypoints
return mapsURL
```
#### File: IPASS/Source/plotGraph.py
```python
import networkx as nx
import matplotlib.pyplot as plt
def visualizeGraph(route):
"""visualizeGraph is een functie die een png van de gekozen route (rode pijlen) maakt"""
graph = nx.DiGraph() # Maakt graph aan
length = len(route) - 1
def getAlphabet(number):
"""Deze functie vertaalt een getal naar een letter (nodig voor networkx)"""
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
return alphabet[number]
for vertex in range(length): # Elke combinatie van vertex moet worden toegevoegd
for secVertex in range(length):
if secVertex != vertex:
graph.add_edges_from([(getAlphabet(vertex), getAlphabet(secVertex))])
storeColor = {'A': 'yellow'} # winkel = geel
values = [storeColor.get(node, 'green') for node in graph.nodes()] # De rest is groen
redArrows = []
for point in range(len(route) - 1):
redArrows.append((getAlphabet(route[point]), getAlphabet(route[point + 1])))
# Voegt alle routepunten toe aan de lijst met rode pijlen
blackEdges = [edge for edge in graph.edges() if edge not in redArrows]
# Slaat alle kanten die niet rood zijn op als zwart
position = nx.spring_layout(graph)
nx.draw_networkx_nodes(graph, position, cmap=plt.get_cmap('jet'),
node_color=values, node_size=500)
nx.draw_networkx_labels(graph, position)
nx.draw_networkx_edges(graph, position, edgelist=redArrows, edge_color='r', arrows=True, arrowsize=20)
nx.draw_networkx_edges(graph, position, edgelist=blackEdges, arrows=False)
plt.savefig('graphplot.png') # Slaat de foto op als png
plt.show()
```
#### File: Source/Tests/testCalculateRoute.py
```python
import unittest
from Source import calculateRoute
class testTSP(unittest.TestCase):
def testDistance(self):
graph = [[0, 10, 15, 20],
[5, 0, 9, 10],
[6, 13, 0, 12],
[8, 8, 9, 0]]
weight = [10, 50, 40]
self.assertEqual(calculateRoute.distance(2, 1, graph, weight, False), 13)
self.assertEqual(calculateRoute.distance(0, 0, graph, weight, False), 0)
self.assertEqual(calculateRoute.distance(0, 1, graph, weight, True), 14)
"""Met gewicht:
Basisafstand = 10 - Gewicht plek 1 = 10 - Totaalgewicht = 100 - 100/10 = 10%
10% x 4 = 40% - 140% van 10 = 14"""
def testTravellingSalesmanProblem(self):
dataSet1 = {'graph': [[0, 2, 3],
[5, 0, 7],
[3, 2, 0]],
'weight': [500, 20],
'store': 0}
print(calculateRoute.travellingSalesmanProblem(dataSet1, True))
self.assertEqual(calculateRoute.travellingSalesmanProblem(dataSet1, False)['cost'], 10)
self.assertEqual(calculateRoute.travellingSalesmanProblem(dataSet1, False)['route'], (0, 2, 1, 0))
self.assertEqual(calculateRoute.travellingSalesmanProblem(dataSet1, True)['route'], (0, 1, 2, 0))
"""Wanneer we rekening houden met het gewicht is het voordeliger om eerst het paket van
500kg te bezorger."""
if __name__ == '__main__':
unittest.main()
```
#### File: Source/Tests/testCreateGraph.py
```python
import unittest
from Source import createGraph
class testCreateGraph(unittest.TestCase):
def testCreateMatrix(self):
data = createGraph.createData("testAddresses.txt")
matrix = createGraph.createMatrix(data)
self.assertEqual(len(matrix), 7)
self.assertEqual(len(matrix[0]), 7) # test of graph vierkant is
self.assertEqual(matrix[0][0], 0)
self.assertEqual(matrix[1][1], 0)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "Jelle-S/tilde",
"score": 3
}
|
#### File: tilde/bin/logfocus.py
```python
from datetime import datetime
from time import sleep
from AppKit import NSWorkspace
#
# configuration:
# - sleep_time Amount of seconds to sleep before rechecking app change.
#
conf = {
'sleep_time' : 1,
}
def print_app_data(app):
print('%s: %s [%s]' % (
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
app['NSApplicationName'],
app['NSApplicationPath']
))
def track_app_focus_change(sleep_time):
last_active_name = None
while True:
active_app = NSWorkspace.sharedWorkspace().activeApplication()
if active_app['NSApplicationName'] != last_active_name:
last_active_name = active_app['NSApplicationName']
print_app_data(active_app)
sleep(sleep_time)
def main(conf):
try:
track_app_focus_change(conf['sleep_time'])
except KeyboardInterrupt, e:
print "\nDone.\n"
if __name__ == '__main__':
main(conf)
```
|
{
"source": "jellestoffels/UVA_AML20",
"score": 3
}
|
#### File: UVA_AML20/week_3/dataset_utils.py
```python
import os
import struct
import numpy as np
try:
import pickle
except ImportError:
import cPickle as pickle
def load_mnist(dataset = "training", path = "."):
"""
Python function for importing the MNIST data set. It returns an iterator
of 2-tuples with the first element being the label and the second element
being a numpy.uint8 2D array of pixel data for the given image.
"""
if dataset is "training":
fname_img = os.path.join(path, 'train-images-idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')
elif dataset is "testing":
fname_img = os.path.join(path, 't10k-images-idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')
else:
raise ValueError("dataset must be 'testing' or 'training'")
# Load everything in some numpy arrays
with open(fname_lbl, 'rb') as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
lbl = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), cols, rows)
get_img = lambda idx: (lbl[idx], img[idx])
# Create an iterator which returns each image in turn
for i in range(len(lbl)):
yield get_img(i)
def load_cifar(file):
with open(file, 'rb') as fo:
try:
dict = pickle.load(fo, encoding='bytes')
except TypeError:
dict = pickle.load(fo)
return dict
```
|
{
"source": "jelletreep/parcels",
"score": 2
}
|
#### File: parcels/parcels/plotting.py
```python
from datetime import datetime
from datetime import timedelta as delta
import numpy as np
from parcels.field import Field
from parcels.field import VectorField
from parcels.grid import CurvilinearGrid
from parcels.grid import GridCode
from parcels.tools.error import TimeExtrapolationError
from parcels.tools.loggers import logger
def plotparticles(particles, with_particles=True, show_time=None, field=None, domain=None, projection=None,
land=True, vmin=None, vmax=None, savefile=None, animation=False, **kwargs):
"""Function to plot a Parcels ParticleSet
:param show_time: Time at which to show the ParticleSet
:param with_particles: Boolean whether particles are also plotted on Field
:param field: Field to plot under particles (either None, a Field object, or 'vector')
:param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show
:param projection: type of cartopy projection to use (default PlateCarree)
:param land: Boolean whether to show land. This is ignored for flat meshes
:param vmin: minimum colour scale (only in single-plot mode)
:param vmax: maximum colour scale (only in single-plot mode)
:param savefile: Name of a file to save the plot to
:param animation: Boolean whether result is a single plot, or an animation
"""
show_time = particles[0].time if show_time is None else show_time
if isinstance(show_time, datetime):
show_time = np.datetime64(show_time)
if isinstance(show_time, np.datetime64):
if not particles.time_origin:
raise NotImplementedError(
'If fieldset.time_origin is not a date, showtime cannot be a date in particleset.show()')
show_time = particles.time_origin.reltime(show_time)
if isinstance(show_time, delta):
show_time = show_time.total_seconds()
if np.isnan(show_time):
show_time, _ = particles.fieldset.gridset.dimrange('time_full')
if field is None:
spherical = True if particles.fieldset.U.grid.mesh == 'spherical' else False
plt, fig, ax, cartopy = create_parcelsfig_axis(spherical, land, projection)
if plt is None:
return # creating axes was not possible
ax.set_title('Particles' + parsetimestr(particles.fieldset.U.grid.time_origin, show_time))
latN, latS, lonE, lonW = parsedomain(domain, particles.fieldset.U)
if cartopy is None or projection is None:
if domain is not None:
if isinstance(particles.fieldset.U.grid, CurvilinearGrid):
ax.set_xlim(particles.fieldset.U.grid.lon[latS, lonW], particles.fieldset.U.grid.lon[latN, lonE])
ax.set_ylim(particles.fieldset.U.grid.lat[latS, lonW], particles.fieldset.U.grid.lat[latN, lonE])
else:
ax.set_xlim(particles.fieldset.U.grid.lon[lonW], particles.fieldset.U.grid.lon[lonE])
ax.set_ylim(particles.fieldset.U.grid.lat[latS], particles.fieldset.U.grid.lat[latN])
else:
ax.set_xlim(np.nanmin(particles.fieldset.U.grid.lon), np.nanmax(particles.fieldset.U.grid.lon))
ax.set_ylim(np.nanmin(particles.fieldset.U.grid.lat), np.nanmax(particles.fieldset.U.grid.lat))
elif domain is not None:
if isinstance(particles.fieldset.U.grid, CurvilinearGrid):
ax.set_extent([particles.fieldset.U.grid.lon[latS, lonW], particles.fieldset.U.grid.lon[latN, lonE],
particles.fieldset.U.grid.lat[latS, lonW], particles.fieldset.U.grid.lat[latN, lonE]])
else:
ax.set_extent([particles.fieldset.U.grid.lon[lonW], particles.fieldset.U.grid.lon[lonE],
particles.fieldset.U.grid.lat[latS], particles.fieldset.U.grid.lat[latN]])
else:
if field == 'vector':
field = particles.fieldset.UV
elif not isinstance(field, Field):
field = getattr(particles.fieldset, field)
depth_level = kwargs.pop('depth_level', 0)
plt, fig, ax, cartopy = plotfield(field=field, animation=animation, show_time=show_time, domain=domain,
projection=projection, land=land, vmin=vmin, vmax=vmax, savefile=None,
titlestr='Particles and ', depth_level=depth_level)
if plt is None:
return # creating axes was not possible
if with_particles:
plon = np.array([p.lon for p in particles])
plat = np.array([p.lat for p in particles])
if cartopy:
ax.scatter(plon, plat, s=20, color='black', zorder=20, transform=cartopy.crs.PlateCarree())
else:
ax.scatter(plon, plat, s=20, color='black', zorder=20)
if animation:
plt.draw()
plt.pause(0.0001)
elif savefile is None:
plt.show()
else:
plt.savefig(savefile)
logger.info('Plot saved to ' + savefile + '.png')
plt.close()
def plotfield(field, show_time=None, domain=None, depth_level=0, projection=None, land=True,
vmin=None, vmax=None, savefile=None, **kwargs):
"""Function to plot a Parcels Field
:param show_time: Time at which to show the Field
:param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show
:param depth_level: depth level to be plotted (default 0)
:param projection: type of cartopy projection to use (default PlateCarree)
:param land: Boolean whether to show land. This is ignored for flat meshes
:param vmin: minimum colour scale (only in single-plot mode)
:param vmax: maximum colour scale (only in single-plot mode)
:param savefile: Name of a file to save the plot to
:param animation: Boolean whether result is a single plot, or an animation
"""
if type(field) is VectorField:
spherical = True if field.U.grid.mesh == 'spherical' else False
field = [field.U, field.V]
plottype = 'vector'
elif type(field) is Field:
spherical = True if field.grid.mesh == 'spherical' else False
field = [field]
plottype = 'scalar'
else:
raise RuntimeError('field needs to be a Field or VectorField object')
plt, fig, ax, cartopy = create_parcelsfig_axis(spherical, land, projection=projection)
if plt is None:
return None, None, None, None # creating axes was not possible
data = {}
plotlon = {}
plotlat = {}
for i, fld in enumerate(field):
show_time = fld.grid.time[0] if show_time is None else show_time
if fld.grid.defer_load:
fld.fieldset.computeTimeChunk(show_time, 1)
(idx, periods) = fld.time_index(show_time)
show_time -= periods * (fld.grid.time_full[-1] - fld.grid.time_full[0])
if show_time > fld.grid.time[-1] or show_time < fld.grid.time[0]:
raise TimeExtrapolationError(show_time, field=fld, msg='show_time')
latN, latS, lonE, lonW = parsedomain(domain, fld)
if isinstance(fld.grid, CurvilinearGrid):
plotlon[i] = fld.grid.lon[latS:latN, lonW:lonE]
plotlat[i] = fld.grid.lat[latS:latN, lonW:lonE]
else:
plotlon[i] = fld.grid.lon[lonW:lonE]
plotlat[i] = fld.grid.lat[latS:latN]
if i > 0 and not np.allclose(plotlon[i], plotlon[0]):
raise RuntimeError('VectorField needs to be on an A-grid for plotting')
if fld.grid.time.size > 1:
if fld.grid.zdim > 1:
data[i] = np.squeeze(fld.temporal_interpolate_fullfield(idx, show_time))[depth_level, latS:latN, lonW:lonE]
else:
data[i] = np.squeeze(fld.temporal_interpolate_fullfield(idx, show_time))[latS:latN, lonW:lonE]
else:
if fld.grid.zdim > 1:
data[i] = np.squeeze(fld.data)[depth_level, latS:latN, lonW:lonE]
else:
data[i] = np.squeeze(fld.data)[latS:latN, lonW:lonE]
if plottype == 'vector':
if field[0].interp_method == 'cgrid_velocity':
logger.warning_once('Plotting a C-grid velocity field is achieved via an A-grid projection, reducing the plot accuracy')
d = np.empty_like(data[0])
d[:-1, :] = (data[0][:-1, :] + data[0][1:, :]) / 2.
d[-1, :] = data[0][-1, :]
data[0] = d
d = np.empty_like(data[0])
d[:, :-1] = (data[0][:, :-1] + data[0][:, 1:]) / 2.
d[:, -1] = data[0][:, -1]
data[1] = d
spd = data[0] ** 2 + data[1] ** 2
speed = np.where(spd > 0, np.sqrt(spd), 0)
vmin = speed.min() if vmin is None else vmin
vmax = speed.max() if vmax is None else vmax
if isinstance(field[0].grid, CurvilinearGrid):
x, y = plotlon[0], plotlat[0]
else:
x, y = np.meshgrid(plotlon[0], plotlat[0])
u = np.where(speed > 0., data[0]/speed, 0)
v = np.where(speed > 0., data[1]/speed, 0)
if cartopy:
cs = ax.quiver(x, y, u, v, speed, cmap=plt.cm.gist_ncar, clim=[vmin, vmax], scale=50, transform=cartopy.crs.PlateCarree())
else:
cs = ax.quiver(x, y, u, v, speed, cmap=plt.cm.gist_ncar, clim=[vmin, vmax], scale=50)
else:
vmin = data[0].min() if vmin is None else vmin
vmax = data[0].max() if vmax is None else vmax
assert len(data[0].shape) == 2
if field[0].interp_method == 'cgrid_tracer':
d = data[0][1:, 1:]
elif field[0].interp_method == 'cgrid_velocity':
if field[0].fieldtype == 'U':
d = np.empty_like(data[0])
d[:-1, :-1] = (data[0][1:, :-1] + data[0][1:, 1:]) / 2.
elif field[0].fieldtype == 'V':
d = np.empty_like(data[0])
d[:-1, :-1] = (data[0][:-1, 1:] + data[0][1:, 1:]) / 2.
else: # W
d = data[0][1:, 1:]
else: # if A-grid
d = (data[0][:-1, :-1] + data[0][1:, :-1] + data[0][:-1, 1:] + data[0][1:, 1:])/4.
d = np.where(data[0][:-1, :-1] == 0, 0, d)
d = np.where(data[0][1:, :-1] == 0, 0, d)
d = np.where(data[0][1:, 1:] == 0, 0, d)
d = np.where(data[0][:-1, 1:] == 0, 0, d)
if cartopy:
cs = ax.pcolormesh(plotlon[0], plotlat[0], d, transform=cartopy.crs.PlateCarree())
else:
cs = ax.pcolormesh(plotlon[0], plotlat[0], d)
if cartopy is None:
ax.set_xlim(np.nanmin(plotlon[0]), np.nanmax(plotlon[0]))
ax.set_ylim(np.nanmin(plotlat[0]), np.nanmax(plotlat[0]))
elif domain is not None:
ax.set_extent([np.nanmin(plotlon[0]), np.nanmax(plotlon[0]), np.nanmin(plotlat[0]), np.nanmax(plotlat[0])], crs=cartopy.crs.PlateCarree())
cs.cmap.set_over('k')
cs.cmap.set_under('w')
cs.set_clim(vmin, vmax)
cartopy_colorbar(cs, plt, fig, ax)
timestr = parsetimestr(field[0].grid.time_origin, show_time)
titlestr = kwargs.pop('titlestr', '')
if field[0].grid.zdim > 1:
if field[0].grid.gtype in [GridCode.CurvilinearZGrid, GridCode.RectilinearZGrid]:
gphrase = 'depth'
depth_or_level = field[0].grid.depth[depth_level]
else:
gphrase = 'level'
depth_or_level = depth_level
depthstr = ' at %s %g ' % (gphrase, depth_or_level)
else:
depthstr = ''
if plottype == 'vector':
ax.set_title(titlestr + 'Velocity field' + depthstr + timestr)
else:
ax.set_title(titlestr + field[0].name + depthstr + timestr)
if not spherical:
ax.set_xlabel('Zonal distance [m]')
ax.set_ylabel('Meridional distance [m]')
plt.draw()
if savefile:
plt.savefig(savefile)
logger.info('Plot saved to ' + savefile + '.png')
plt.close()
return plt, fig, ax, cartopy
def create_parcelsfig_axis(spherical, land=True, projection=None, central_longitude=0):
try:
import matplotlib.pyplot as plt
except:
logger.info("Visualisation is not possible. Matplotlib not found.")
return None, None, None, None # creating axes was not possible
if projection is not None and not spherical:
raise RuntimeError('projection not accepted when Field doesn''t have geographic coordinates')
if spherical:
try:
import cartopy
except:
logger.info("Visualisation of field with geographic coordinates is not possible. Cartopy not found.")
return None, None, None, None # creating axes was not possible
projection = cartopy.crs.PlateCarree(central_longitude) if projection is None else projection
fig, ax = plt.subplots(1, 1, subplot_kw={'projection': projection})
try: # gridlines not supported for all projections
gl = ax.gridlines(crs=projection, draw_labels=True)
gl.xlabels_top, gl.ylabels_right = (False, False)
gl.xformatter = cartopy.mpl.gridliner.LONGITUDE_FORMATTER
gl.yformatter = cartopy.mpl.gridliner.LATITUDE_FORMATTER
except:
pass
if land:
ax.coastlines()
else:
cartopy = None
fig, ax = plt.subplots(1, 1)
ax.grid()
return plt, fig, ax, cartopy
def parsedomain(domain, field):
field.grid.check_zonal_periodic()
if domain is not None:
if not isinstance(domain, dict) and len(domain) == 4: # for backward compatibility with <v2.0.0
domain = {'N': domain[0], 'S': domain[1], 'E': domain[2], 'W': domain[3]}
_, _, _, lonW, latS, _ = field.search_indices(domain['W'], domain['S'], 0, 0, 0, search2D=True)
_, _, _, lonE, latN, _ = field.search_indices(domain['E'], domain['N'], 0, 0, 0, search2D=True)
return latN+1, latS, lonE+1, lonW
else:
if field.grid.gtype in [GridCode.RectilinearSGrid, GridCode.CurvilinearSGrid]:
return field.grid.lon.shape[0], 0, field.grid.lon.shape[1], 0
else:
return len(field.grid.lat), 0, len(field.grid.lon), 0
def parsetimestr(time_origin, show_time):
if time_origin.calendar is None:
return ' after ' + str(delta(seconds=show_time)) + ' hours'
else:
date_str = str(time_origin.fulltime(show_time))
return ' on ' + date_str[:10] + ' ' + date_str[11:19]
def cartopy_colorbar(cs, plt, fig, ax):
cbar_ax = fig.add_axes([0, 0, 0.1, 0.1])
fig.subplots_adjust(hspace=0, wspace=0, top=0.925, left=0.1)
plt.colorbar(cs, cax=cbar_ax)
def resize_colorbar(event):
plt.draw()
posn = ax.get_position()
cbar_ax.set_position([posn.x0 + posn.width + 0.01, posn.y0, 0.04, posn.height])
fig.canvas.mpl_connect('resize_event', resize_colorbar)
resize_colorbar(None)
```
|
{
"source": "JellevanCappelle/OrganisedAssembly",
"score": 2
}
|
#### File: OrganisedAssembly/Parser/language.py
```python
from arpeggio import Optional, ZeroOrMore, OneOrMore, Sequence, Not, EOF
from arpeggio import RegExMatch as regex
from arpeggio import ParserPython
from arpeggio import NonTerminal
from arpeggio.export import PTDOTExporter
import sys
import json
# load instructions and other keywords
with open("keywords/keywords.txt", "r") as db:
keywords = db.read().split('\n')
with open("keywords/instructions.txt", "r") as db:
instructionKeyword = db.read().split('\n')
instructionKeyword = [i for i in instructionKeyword if not i == "callf" and not i.startswith("rep")]
with open("keywords/sse instructions.txt", "r") as sse:
sseInstructionKeyword = list(sse)
# register types
GPRs = []
for l in "abcd":
GPRs += [l + 'l', l + 'h', l + 'x', f"e{l}x", f"r{l}x"]
for r in ["di", "si", "sp", "bp"]:
GPRs += [r, 'e' + r, 'r' + r]
for i in range(8, 16):
GPRs.append(f"r{i}")
GPRs.append(f"r{i}d")
GPRs.append(f"r{i}w")
SRs = [l + 's' for l in "cdefg"]
CRs = ["cr" + str(i) for i in range(8)]
SSERs = ["xmm" + str(i) for i in range(16)]
conditionSuffixes = ["e", "z", "l", "g", "a", "b", "le", "ge", "be", "ae", "c", "s", "p", "o"]
ifKeywords = []
forKeywords = []
whileKeywords = []
for suf in conditionSuffixes:
ifKeywords += ["if" + suf, "ifn" + suf]
forKeywords += ["for" + suf, "forn" + suf]
whileKeywords += ["while" + suf, "whilen" + suf]
instructionKeyword += ["set" + suf, "setn" + suf]
sizeKeywords = ["byte", "word", "dword", "qword"]
stringSizeKeywords = [kwd + 's' for kwd in sizeKeywords]
miscKeywords = ["constant", "string", "cstring", "function", "method", "using", "namespace", "ref", "enum", "struct", "sizeof", "alias", "binary", "ref", "value", "params"]
nonIdentifiers = instructionKeyword + ifKeywords + forKeywords + whileKeywords + sizeKeywords + stringSizeKeywords + miscKeywords
# define language rules
ARRAY_TYPE = "Array"
# registers
def gpRegister(): return GPRs
def segRegister(): return SRs
def controlRegister(): return CRs
def register(): return [gpRegister, segRegister, controlRegister]
def sseRegister(): return SSERs
# numbers
def decimal(): return regex('[0-9]+d?')
def hexadecimal(): return [regex('0x[0-9a-fA-F]+'), regex('[0-9][0-9a-fA-F]*h')]
def binary(): return [regex('0b[01]+'), regex('[01]+b')]
def singleQuotedString(): return regex(r"'([^'\\]|\\.)*'")
def doubleQuotedString(): return regex(r'"([^"\\]|\\.)*"')
def number(): return [hexadecimal, binary, decimal, singleQuotedString]
# identifier
def name(): return Not(nonIdentifiers), regex('[a-zA-Z_][a-zA-Z0-9_]*')
def namePath(): return name, ZeroOrMore('.', name)
def templateParam(): return sizeOrType() # anything that is a sizeOrType can also be a template parameter, but more options might be added later
def templateParamList(): return '<', templateParam, ZeroOrMore(',', templateParam), '>'
def templateDeclParamList(): return '<', name, ZeroOrMore(',', name), '>' # TODO: allow syntax for specifying parameter types
def templateName(): return name, Optional(templateDeclParamList)
def identifier(): return name, Optional(templateParamList)
def identifierPath(): return identifier, ZeroOrMore('.', identifier)
# expression # TODO: differentiate between expressions that can or can't contain registers (i.e. effective addresses or immediates)
def sizeof(): return "sizeof", '<', templateParam, '>'
def exprValue(): return [number, register, sizeof, identifierPath]
def binaryOperator(): return ['+', '-', '*', '/', '^', '&', '|', '<<', '>>']
def unaryOperator(): return ['-', '~']
def exprTerm(): return Optional(unaryOperator), [('(', expr, ')'), exprValue]
def expr(): return exprTerm, ZeroOrMore(binaryOperator, exprTerm)
# operands
def baseOrOffset(): return [gpRegister, expr]
def offsetMultiplier(): return baseOrOffset, Optional('*', expr) # TODO: redo this (and expressions in general) properly, taking precedence rules into account
def baseOffsetMultiplier(): return baseOrOffset, Optional(['+', '-'], offsetMultiplier)
def address(): return baseOffsetMultiplier # TODO: fix baseOffsetMultiplier
def segAddress(): return Optional(segRegister, ':'), address
def sizeSpecifier(): return sizeKeywords
def memReference(): return Optional(sizeSpecifier), '[', segAddress, ']'
def immediate(): return Optional(sizeSpecifier), expr # can also be an aliased register!
def operand(): return [register, memReference, aliasDecl, immediate]
def operandList(): return operand, ZeroOrMore(',', operand)
# sse instructions
def sseMemReference(): return '[', segAddress, ']'
def sseOperand(): return [sseRegister, sseMemReference]
def sseOpcode(): return sseInstructionKeyword
def sseInstruction(): return sseOpcode, sseOperand, ZeroOrMore(',', sseOperand)
# statements
def opcode(): return instructionKeyword
def repPrefix(): return ["rep", "repe", "repne", "repnz", "repz"]
def lockPrefix(): return "lock"
def instruction(): return Optional(lockPrefix), Optional(repPrefix), opcode, Optional(operandList)
def label(): return name, ':'
def comment(): return regex('#.*')
def statement(): return Optional(label), Optional([instruction, sseInstruction, controlFlow, abiReturn, abiCall, methodCall, declaration]), Optional(comment)
def emptyStatement(): return Optional(comment)
# variables and constants
def refType(): return "ref", '<', sizeOrType, '>'
def valueType(): return "value", '<', sizeOrType, '>'
def sizeOrType(): return [sizeSpecifier, identifierPath, refType, valueType]
def exprList(): return expr, ZeroOrMore(',', expr)
def varAssignment(): return '=', expr
def variableDecl(): return sizeOrType, '[', name, ']', Optional(varAssignment)
def dataStringType(): return stringSizeKeywords
def dataStringDecl(): return dataStringType, '[', name, ']', '=', exprList
def textStringDecl(): return "string", '[', name, ']', '=', doubleQuotedString
def cStringDecl(): return "cstring", '[', name, ']', '=', singleQuotedString
def fileDecl(): return "binary", '[', name, ']', ':', doubleQuotedString
def constantDecl(): return "constant", name, '=', expr
def arrayDecl(): return "byte", '<', expr, '>', '[', name, ']'
def aliasDecl(): return "alias", name, '=', gpRegister
def declaration(): return [variableDecl, dataStringDecl, textStringDecl, cStringDecl, constantDecl, arrayDecl, aliasDecl, fileDecl]
# enums
def enumAssignment(): return name, '=', expr
def enumStatement(): return Optional(enumAssignment), Optional(comment)
def enumBody(): return '{', enumStatement, ZeroOrMore('\n', enumStatement), '}'
def enum(): return "enum", name, Optional(emptySpace), enumBody, Optional(comment)
# ABI calls
def memArgument(): return Optional(sizeOrType), '[', segAddress, ']'
def immArgument(): return Optional(sizeSpecifier), expr
def refArgument(): return "ref", [('[', segAddress, ']'), singleQuotedString, doubleQuotedString]
def argument(): return [gpRegister, memArgument, refArgument, immArgument]
def returnTarget(): return [gpRegister, memArgument, aliasDecl, name] # only aliased registers should be allowed for 'name'
def returnTargetList(): return returnTarget, ZeroOrMore(',', returnTarget)
def argumentList(): return argument, ZeroOrMore(',', argument)
def abiAssignment(): return returnTargetList, '='
def abiCall(): return Optional(abiAssignment), identifierPath, '(', Optional(argumentList), ')' # TODO: allow registers and memory operands to serve as function pointers
def abiReturn(): return "return", Optional(argumentList)
# functions
def paramsKeyword(): return "params"
parameterType = [sizeOrType, (paramsKeyword, ARRAY_TYPE, '<', sizeOrType, '>')]
def parameter(): return parameterType, '[', name, ']'
def parameterList(): return parameter, ZeroOrMore(',', parameter)
def functionDeclaration(): return "function", templateName, '(', Optional(parameterList), ')'
def emptySpace(): return OneOrMore(emptyStatement, '\n')
def localCode(): return statement, ZeroOrMore('\n', statement)
def localBody(): return '{', localCode, '}'
def function(): return functionDeclaration, Optional(emptySpace), localBody, Optional(comment)
# structs
def structVariableDecl(): return sizeOrType, '[', name, ']'
def structField(): return [structVariableDecl, arrayDecl]
def staticKeyword(): return "static"
def structMethodDecl(): return Optional(staticKeyword), "method", templateName, '(', Optional(parameterList), ')'
def structMethod(): return structMethodDecl, Optional(emptySpace), localBody, Optional(comment)
def structStatement(): return Optional([constantDecl, structField, structMethod]), Optional(comment)
def structBody(): return '{', structStatement, ZeroOrMore('\n', structStatement), '}'
def struct(): return "struct", templateName, Optional(emptySpace), structBody, Optional(comment)
def regPointerCast(): return '(', gpRegister, "as", identifierPath, ')'
def memPointerCast(): return '[', segAddress, "as", identifierPath, ']'
def directPointer(): return '[', identifierPath, ']'
def structReference(): return [directPointer, regPointerCast, memPointerCast]
def methodCall(): return Optional(abiAssignment), structReference, '.', identifierPath, '(', Optional(argumentList), ')'
# control flow
def oneliner(): return [instruction, controlFlow, abiReturn, abiCall]
def initialiser(): return [instruction, abiCall, declaration]
def condition(): return [instruction, abiCall]
def repeatable(): return [instruction, controlFlow, abiCall]
def ifKeyword(): return ifKeywords
def ifBody(): return [localBody, oneliner]
def ifStatement(): return ifKeyword, '(', Optional(condition), ')', Optional(emptySpace), ifBody, Optional(comment), Optional('\n', elseStatement)
def elseStatement(): return "else", Optional(emptySpace), ifBody, Optional(comment)
def whileKeyword(): return whileKeywords
def loopBody(): return [localBody, repeatable]
def whileLoop(): return whileKeyword, '(', Optional(condition), ')', Optional(emptySpace), loopBody, Optional(comment)
def doWhileLoop(): return "do", Optional(emptySpace), loopBody, Optional(emptySpace), whileKeyword, '(', Optional(condition), ')', Optional(comment)
def forKeyword(): return forKeywords
def forLoop(): return forKeyword, '(', Optional(initialiser), ';', Optional(condition), ';', Optional(repeatable), ')', Optional(emptySpace), loopBody, Optional(comment)
def doForLoop(): return "do", Optional(emptySpace), loopBody, Optional(emptySpace), forKeyword, '(', Optional(initialiser), ';', Optional(condition), ';', Optional(repeatable), ')', Optional(comment)
def breakStatement(): return "break"
def continueStatement(): return "continue"
def controlFlow(): return [breakStatement, continueStatement, ifStatement, whileLoop, doWhileLoop, forLoop, doForLoop]
# namespaces
def globalStatement(): return [namespace, enum, struct, function, statement, '']
def globalCode(): return globalStatement, ZeroOrMore('\n', globalStatement)
def namespaceDeclaration(): return 'namespace', namePath
def namespaceBody(): return '{', globalCode, '}'
def namespace(): return namespaceDeclaration, Optional(emptySpace), namespaceBody, Optional(comment)
# file structure
def using(): return "using", namePath, Optional(comment), '\n'
def usingList(): return using, ZeroOrMore(Optional(emptySpace), using)
def program(): return Optional(emptySpace), Optional(usingList), globalCode, EOF
parser = ParserPython(program, ws = "\t\r ", autokwd = True, reduce_tree = False, memoization = True)
with open(input()) as file:
code = file.read()
try:
result = parser.parse(code)
except Exception as e:
print(e, file=sys.stderr)
sys.exit()
def semiSerialise(nonTerminal):
rule = nonTerminal.rule_name
children = []
for i in range(len(nonTerminal)):
child = nonTerminal[i]
if isinstance(child, NonTerminal):
child = semiSerialise(child)
else:
if child.rule_name != "":
child = (child.rule_name, [str(child)], parser.pos_to_linecol(child.position))
else:
child = str(child)
children.append(child)
position = parser.pos_to_linecol(nonTerminal.position)
return (rule, children, position)
def escape(string):
chars = '\\"\t\n'
replacements = '\\"tn'
for i, c in enumerate(chars):
string = string.replace(c, f"\\{replacements[i]}")
return f'"{string}"'
def serialise(tree):
if isinstance(tree, tuple):
rule = tree[0]
children = list(map(serialise, tree[1]))
line, column = tree[2]
position = f"{line},{column}"
return f'{{"{rule}":[{",".join(children + [position])}]}}'
else:
return escape(tree)
tree = semiSerialise(result)
tree_json = serialise(tree)
print(tree_json)
```
|
{
"source": "jellevandehaterd/ansible-role-cyberarkpasswordvault-lookup",
"score": 3
}
|
#### File: ansible-role-cyberarkpasswordvault-lookup/filter_plugins/filters.py
```python
def format_list(list_, pattern):
return [pattern % s for s in list_]
def remove_prefix_list(list_, pattern):
result = []
for line in list_:
if line.startswith( pattern ):
result.append(line[len(pattern):])
return result
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
class FilterModule(object):
def filters(self):
return {
'format_list': format_list,
'remove_prefix_list': remove_prefix_list,
'remove_prefix': remove_prefix,
}
```
|
{
"source": "jellevandehaterd/mypackage",
"score": 3
}
|
#### File: mypackage/demo/demo.py
```python
import json
import logging
from os.path import sep
from pathlib import Path
logger = logging.getLogger(__name__)
class DemoInputFileDoesNotExistException(Exception):
message: str
def __init__(self, file_name: str):
"""Initialize the exception with the file_name of the missing file"""
self.message = f"Input file '{file_name}' does not exist"
class DemoOutputFileDoesNotExistException(Exception):
message: str
def __init__(self, file_name: str):
"""Initialize the exception with the file_name of the missing file"""
self.message = f"Output file '{file_name}' does not exist"
class Demo:
"""
A class that represents a Demo.
"""
input_path: Path
output_path: Path
def __init__(self, input_path: str, output_path: str = None) -> None:
"""
Initialize a demo
:param input_path: input file path
:param output_path: output file path
"""
self.input_path = Path(input_path).absolute()
if not self.input_path.is_file():
logger.error(f"Input file '{input_path}' does not exist")
raise DemoInputFileDoesNotExistException(input_path)
if output_path is None:
input_dir: Path = self.input_path.parents[0]
output_path = f"{input_dir}{sep}output.json"
Path(output_path).touch()
self.output_path = Path(output_path).absolute()
if not self.output_path.is_file():
logger.error(f"Output file '{output_path}' does not exist")
raise DemoOutputFileDoesNotExistException(output_path)
```
#### File: mypackage/mypackage/mypackage.py
```python
import logging
from pathlib import Path
import click
from mypackage.demo.demo import Demo
logger = logging.getLogger(__name__)
DEFAULT_VERBOSITY = 0
def configure_logging(verbosity: int, quiet: int, json: bool = False) -> None:
"""Set the log level according to a level of verbosity and level of quietness"""
# Base log level is WARNING, subtract the verbosity and add the quietness
log_level = logging.WARNING - (verbosity * 10) + (quiet * 10)
logger.debug(
f"Setting log_level to {log_level} using q '{quiet}' and v '{verbosity}'"
)
# If JSON output is requested
log_fmt: str = logging.BASIC_FORMAT
if json:
log_fmt = (
'{"message": "%(message)s", "level": "%(levelname)s", "name": "%(name)s", '
'"asctime": "%(asctime)s"}'
)
logging.basicConfig(format=log_fmt, level=log_level)
#########
# Menus #
#########
logo: str = r"""
______ ______ _
| ___ \ (_____ \ | |
| | _ | |_ _ _____) )___ ____| | _ ____ ____ ____
| || || | | | | ____/ _ |/ ___) | / ) _ |/ _ |/ _ )
| || || | |_| | | ( ( | ( (___| |< ( ( | ( ( | ( (/ /
|_||_||_|\__ |_| \_||_|\____)_| \_)_||_|\_|| |\____)
(____/ (_____|
"""
@click.group()
@click.version_option(
prog_name="MyPackage", message=f"{logo}\n%(prog)s, version %(version)s"
)
def main() -> None:
"""
Welcome to the MyPackage CLI
"""
pass
@main.group()
def demo() -> None:
"""
Commands related to the demo.
"""
pass
@demo.command()
@click.argument("input-dir", nargs=1, required=True, type=click.Path())
@click.argument("output-dir", nargs=1, required=False, type=click.Path())
@click.option(
"-v", count=True, help="Increase logging verbosity", default=DEFAULT_VERBOSITY,
)
@click.option("-q", count=True, help="Decrease logging verbosity", default=0)
@click.option(
"-o-json",
is_flag=True,
type=bool,
help="Format the output logs as JSON",
default=False,
)
def test(input_dir: str, output_dir: str, v: int, q: int, o_json: bool) -> None:
configure_logging(v, q, o_json)
try:
Demo(input_dir, output_dir)
exit(0)
except Exception as error:
logger.critical(error)
exit(1)
if __name__ == "__main__":
main()
```
#### File: tests/demo/demo_test.py
```python
import os
from os.path import sep
from pathlib import Path
import pytest
from _pytest.logging import LogCaptureFixture
from mypackage.demo.demo import Demo
from mypackage.demo.demo import DemoInputFileDoesNotExistException
from mypackage.demo.demo import DemoOutputFileDoesNotExistException
class TestDemo:
@pytest.mark.parametrize("input_path", ["input.txt"])
def test__init__throws_exception_if_input_path_does_not_exist(
self, tmp_path: Path, input_path: str
) -> None:
with pytest.raises(DemoInputFileDoesNotExistException) as context:
Demo(f"{tmp_path}{sep}{input_path}")
assert (
f"Input file '{tmp_path}{sep}{input_path}' does not exist"
== context.value.message
)
@pytest.mark.parametrize("input_path, output_path", [("input.txt", "output.json")])
def test__init__throws_exception_if_output_path_does_not_exist(
self, tmp_path: Path, input_path: str, output_path: str
) -> None:
with pytest.raises(DemoOutputFileDoesNotExistException) as context:
Path(f"{tmp_path}{sep}{input_path}").touch()
os.chdir(str(tmp_path))
Demo(f"{tmp_path}{sep}{input_path}", f"{tmp_path}{sep}{output_path}")
assert (
f"Output file '{tmp_path}{sep}{output_path}' does not exist"
== context.value.message
)
@pytest.mark.parametrize("input_path", ["input.txt"])
def test__init__sets_input_path_properly(
self, tmp_path: Path, input_path: str
) -> None:
Path(f"{tmp_path}{sep}{input_path}").touch()
os.chdir(str(tmp_path))
demo = Demo(f"{tmp_path}{sep}{input_path}")
assert f"{tmp_path}{sep}{input_path}" == str(demo.input_path)
```
|
{
"source": "Jell-E/videocaptionbot",
"score": 3
}
|
#### File: Jell-E/videocaptionbot/captionbot.py
```python
import sys
import telepot
from telepot.delegate import per_chat_id, create_open
class MessageCounter(telepot.helper.ChatHandler):
def __init__(self, seed_tuple, timeout):
super(MessageCounter, self).__init__(seed_tuple, timeout)
self._video = 0
def on_chat_message(self, msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if content_type == 'video':
self._video = msg['video']['file_id']
self.sender.sendMessage("Great, now send the caption for this video!")
elif content_type == 'text':
if self._video != 0:
self.sender.sendVideo(self._video, caption=msg['text'])
else:
if msg['text'].startswith('/'):
if msg['text'] == '/start':
self.sender.sendMessage("Hey, you can use this bot to put captions on videos! Just send a video first and then the caption you want to use.")
else:
self.sender.sendMessage("I'm sorry, I don't know what to do with this command!")
else:
self.sender.sendMessage("First send a video and then send a caption to go with it!")
TOKEN = 'YOUR_TOKEN_HERE'
bot = telepot.DelegatorBot(TOKEN, [
(per_chat_id(), create_open(MessageCounter, timeout=10)),
])
bot.notifyOnMessage(run_forever=True)
```
|
{
"source": "jellevink/randomnumberproject",
"score": 3
}
|
#### File: random1/application/routes.py
```python
from flask import request, render_template
from application import app
import requests
import random
@app.route('/', methods=["GET"])
def makerandomnum1():
rand_num1=str(random.randint(1,10))
print(rand_num1)
return rand_num1
print(makerandomnum1())
```
#### File: random1/tests/test_random1.py
```python
import pytest
import unittest
from application import app, routes
def test_random_number1():
assert routes.makerandomnum1()=="1" or "2" or "3" or "4" or "5" or "6" or "7" or "8" or "9" or "10"
```
|
{
"source": "JelleZijlstra/asynq",
"score": 2
}
|
#### File: asynq/asynq/async_task.py
```python
import inspect
import six
import sys
import qcore.helpers as core_helpers
import qcore.inspection as core_inspection
import qcore.errors as core_errors
from . import debug
from . import futures
from . import scheduler
from . import _debug
__traceback_hide__ = True
_debug_options = _debug.options
_futures_none = futures._none
_none_future = futures.none_future
MAX_DUMP_INDENT = 40
class AsyncTaskCancelledError(GeneratorExit):
pass
class AsyncTaskResult(GeneratorExit):
def __init__(self, result):
GeneratorExit.__init__(self, "AsyncTaskResult")
self.result = result
class AsyncTask(futures.FutureBase):
"""Asynchronous task implementation.
Uses passed generator (normally produced by generator method)
to implement asynchronous "await" behavior via 'yield' keyword.
"""
def __init__(self, generator, fn, args, kwargs, group_cancel=True, max_depth=0):
super(AsyncTask, self).__init__()
if _debug_options.ENABLE_COMPLEX_ASSERTIONS:
assert core_inspection.is_cython_or_generator(generator), \
'generator is expected, but %s is provided' % generator
self.fn = fn
self.args = args
self.kwargs = kwargs
self.is_scheduled = False
self.iteration_index = 0
self.group_cancel = group_cancel
self.caller = None
self.depth = 0
self.max_depth = max_depth
self.scheduler = None
self.creator = scheduler.get_active_task()
self._generator = generator
self._frame_info = None
self._last_value = None
self._dependencies = set()
if self.creator is None:
self._contexts = []
else:
self._contexts = list(self.creator._contexts)
if _debug_options.DUMP_NEW_TASKS:
debug.write('@async: new task: %s, created by %s' %
(debug.str(self), debug.str(self.creator)))
def can_continue(self):
"""Indicates whether this async task has more steps to execute.
Task can't continue, if its underlying generator is disposed.
"""
return self._generator is not None
def is_blocked(self):
"""Indicates whether this async task is currently blocked.
Blocked tasks are tasks waiting for completion of other
tasks or batches.
"""
return True if self._dependencies else False
def start(self, run_immediately=False):
"""
Starts the task on the current task scheduler.
Newly created tasks aren't started by default.
:param run_immediately: indicates whether first iteration
over the generator must be performed immediately
:return: self
"""
s = scheduler.get_scheduler()
return s.start(self, run_immediately)
def after(self, future):
"""Starts the task after specified future gets computed.
:param future: future that must be computed
:return: self
"""
s = scheduler.get_scheduler()
return s.continue_with(future, self)
def cancel_dependencies(self, error):
if not self._dependencies:
return
dependencies = list(self._dependencies)
cancellation_error = error if isinstance(error, AsyncTaskCancelledError) else AsyncTaskCancelledError(error)
if _debug_options.DUMP_DEPENDENCIES:
debug.write('@async: -> cancelling dependencies of %s:' % debug.str(self))
for dependency in dependencies:
_cancel_futures(dependency, cancellation_error)
if _debug_options.DUMP_DEPENDENCIES:
debug.write('@async: <- cancelled dependencies of %s' % debug.str(self))
def _compute(self):
# Forwards the call to task scheduler
scheduler.get_scheduler().await([self])
# No need to assign a value/error here, since
# _continue method (called by TaskScheduler) does this.
def _computed(self):
try:
if self._generator is not None:
self._generator.close()
self._generator = None
finally:
# super() doesn't work in Cython-ed version here
futures.FutureBase._computed(self)
error = self.error()
if error is not None and self.group_cancel:
self.cancel_dependencies(error)
def _continue(self):
self._before_continue()
# Obvious optimization: we don't need to return from
# this method, if we still can go further after yield.
# So we return only if there are dependencies or
# current task is computed.
while True:
try:
value = unwrap(self._last_value)
error = None
except BaseException as e:
value = None
error = e
try:
self._accept_yield_result(self._continue_on_generator(value, error))
except StopIteration as error: # Most frequent, so it's the first one
if hasattr(error, 'value'):
# We're on a Python version that supports adding a value to StopIteration
self._queue_exit(error.value)
else:
# This means there was no asynq.result() call, so the value of
# this task should be None
self._queue_exit(None)
except GeneratorExit as error:
error_type = type(error)
if error_type is AsyncTaskResult:
self._queue_exit(error.result)
elif error_type is AsyncTaskCancelledError:
self._accept_error(error)
else:
self._queue_exit(None)
except BaseException as error:
self._accept_error(error)
finally:
if self._dependencies or self.is_computed():
self._after_continue()
return
def _continue_on_generator(self, value, error):
try:
if error is None:
if self._generator is None:
raise StopIteration()
elif self._generator is None:
raise error
self.iteration_index += 1
if error is None:
return self._generator.send(value)
else:
self._last_value = None # No need to keep it further
self._frame_info = debug.get_frame_info(self._generator)
if hasattr(error, '_task'):
return self._generator.throw(error._type_, error, error._traceback)
else:
return self._generator.throw(type(error), error)
except (StopIteration, GeneratorExit):
# Returning leads to a StopIteration exception, which is
# handled here. In this case we shouldn't need to extract frame
# info, because that is slow.
self._generator = None
raise
except:
# Any exception means generator is closed at this point,
# since it fall through all exception handlers there;
# "return" leads to StopIteration exception, so it's
# handled here as well.
# If the task failed, we want to save the frame info here so that the traceback can
# show where in the async task the failure happened. However, if the error was thrown
# into the generator, we'll already have set the frame info.
if self._frame_info is None:
tb = sys.exc_info()[2]
while tb.tb_next is not None:
tb = tb.tb_next
self._frame_info = inspect.getframeinfo(tb.tb_frame)
self._generator = None
raise
def _accept_yield_result(self, result):
if _debug_options.DUMP_YIELD_RESULTS:
debug.write('@async: yield: %s -> %s' % (debug.str(self), debug.repr(result)))
self._last_value = result
self.scheduler.add_dependencies(self, result)
def _accept_error(self, error):
if self._value is not _futures_none: # is_computed(), but faster
# We can't change the value in this case, so we can just return here.
pass
else:
# when there is no _task it means that this is the bottommost level of the async
# task. We must attach the traceback as soon as possible
if not hasattr(error, '_task'):
error._task = self
core_errors.prepare_for_reraise(error)
else:
# when we already have the _task on the error, it means that
# some child generator of ours had an error.
# now, we are storing the _traceback on the error. we use this so we can
# raise it with that exact traceback later.
# now, we when do raise it, the upper level gets a new traceback
# with the curent level's traceback connected via a linked list pointer.
# known as tb_next in traceback object.
# this is really important. if we keep updating this traceback,
# we can glue all the different tasks' tracebacks and make it look like
# the error came from there.
error._traceback = sys.exc_info()[2]
if _debug_options.DUMP_EXCEPTIONS:
debug.dump_error(error)
# Must queue, since there can be captured dependencies to resolve
self._queue_throw_error(error)
def _queue_exit(self, result):
# Result must be unwrapped first,
# and since there can be dependencies, the error result (exception)
# is still possible at this point.
#
# So we can't instantly close the generator here, since if there is
# an error during dependency computation, we must re-throw it
# from the generator.
if self._value is not _futures_none: # is_computed(), but faster
raise futures.FutureIsAlreadyComputed(self)
if self._generator is not None:
self._generator.close()
self._generator = None
if self._dependencies: # is_blocked(), but faster
if _debug_options.DUMP_QUEUED_RESULTS:
debug.write('@async: queuing exit: %s <- %s' % (debug.repr(result), debug.str(self)))
self._last_value = result
else:
self.set_value(result)
def _queue_throw_error(self, error):
if self._value is not _futures_none: # is_computed(), but faster
raise futures.FutureIsAlreadyComputed(self)
if self._generator is not None or self._dependencies: # can_continue() or is_blocked(), but faster
if _debug_options.DUMP_QUEUED_RESULTS:
debug.write('@async: queuing throw error: %s <-x- %s' % (debug.repr(error), debug.str(self)))
self._last_value = futures.ErrorFuture(error) # To get it re-thrown on unwrap
if self.group_cancel:
self.cancel_dependencies(error)
else:
self.set_error(error)
def _remove_dependency(self, dependency):
self._remove_dependency_cython(dependency)
def _remove_dependency_cython(self, dependency):
if _debug_options.DUMP_DEPENDENCIES:
debug.write('@async: -dependency: %s got %s' % (debug.str(self), debug.str(dependency)))
self._dependencies.remove(dependency)
if self._value is _futures_none: # not is_computed(), but faster
try:
error = dependency.error()
if not (error is None or isinstance(error, AsyncTaskCancelledError)):
self._queue_throw_error(error)
finally:
if not self._dependencies: # not is_blocked(), but faster
self.scheduler._schedule_without_checks(self)
def make_dependency(self, task, scheduler):
"""Mark self as a dependency on task.
i.e. self needs to be computed before task can be continued further.
"""
self.depth = task.depth + 1
if self.max_depth == 0:
self.max_depth = task.max_depth
if self.max_depth != 0:
if self.depth > self.max_depth:
debug.dump(scheduler)
assert False, \
"Task stack depth exceeded specified maximum (%i)" % self.max_depth
self.caller = task
task._dependencies.add(self)
self.on_computed.subscribe(task._remove_dependency)
scheduler.schedule(self)
def _before_continue(self):
self._resume_contexts()
def _after_continue(self):
self._pause_contexts()
def traceback(self):
try:
self_str = self._traceback_line()
except Exception:
# If _traceback_line failed for whatever reason (e.g. there is no correct frame_info),
# fall back to __str__ so that we can still provide useful information for debugging
self_str = core_helpers.safe_str(self)
if self.caller is None:
return [self_str]
result = self.caller.traceback()
result.append(self_str)
return result
def _traceback_line(self):
frame_info = self._frame_info
if frame_info is None and self._generator is not None:
frame_info = debug.get_frame_info(self._generator)
if frame_info is not None:
template = '''File "%(file)s", line %(lineno)s, in %(funcname)s
%(codeline)s'''
return template % {
'file': frame_info.filename,
'lineno': frame_info.lineno,
'funcname': frame_info.function,
'codeline': '\n'.join(frame_info.code_context).strip()
}
else:
return str(self)
def __str__(self):
fn_str = core_inspection.get_function_call_str(self.fn, self.args, self.kwargs)
name = '@async %s' % fn_str
# we subtract one because by the time the stacktrace is printed
# the iteration_index has already been incremented
if self.iteration_index - 1 == 0:
step = 'before 1st yield'
else:
step = 'passed yield #%i' % (self.iteration_index - 1)
if self.is_computed():
status = 'computed, '
if self.error() is None:
status += '= ' + repr(self.value())
else:
status += 'error = ' + repr(self.error())
else:
if self.is_blocked():
status = 'blocked x%i' % len(self._dependencies)
elif self.can_continue():
if self.is_scheduled:
status = 'scheduled'
elif self.scheduler:
status = 'waiting'
else:
status = 'new'
else:
status = 'almost finished (generator is closed)'
if self.is_scheduled:
status += ', scheduled'
return '%s (%s, %s)' % (name, status, step)
def dump(self, indent=0):
if indent > MAX_DUMP_INDENT:
debug.write('...', indent + 1)
return
debug.write(debug.str(self), indent)
if self._dependencies:
debug.write('Dependencies:', indent + 1)
for dependency in self._dependencies:
dependency.dump(indent + 2) # Recursive
# debug.write(debug.str(dependency), indent + 2)
else:
debug.write('No dependencies.', indent + 1)
# Contexts support
def _enter_context(self, context):
if _debug_options.DUMP_CONTEXTS:
debug.write('@async: +context: %s' % debug.str(context))
self._contexts.append(context)
def _leave_context(self, context):
if _debug_options.DUMP_CONTEXTS:
debug.write('@async: -context: %s' % debug.str(context))
self._contexts.remove(context)
def _pause_contexts(self):
contexts = self._contexts
i = len(contexts) - 1
# execute each __pause__() in a try/except and if 1 or more of them
# raise an exception, then save the last exception raised so that it
# can be re-raised later. We re-raise the last exception to make the
# behavior consistent with __exit__.
error = None
while i >= 0:
try:
contexts[i].__pause__()
except BaseException as e:
error = e
core_errors.prepare_for_reraise(error)
i -= 1
if error is not None:
self._accept_error(error)
def _resume_contexts(self):
i = 0
contexts = self._contexts
l = len(contexts)
# same try/except deal as with _pause_contexts, but in this case
# we re-raise the first exception raised.
error = None
while i < l:
try:
contexts[i].__resume__()
except BaseException as e:
if error is None:
error = e
core_errors.prepare_for_reraise(error)
i += 1
if error is not None:
self._accept_error(error)
def unwrap(value):
"""
'Unwraps' the provided arguments by converting:
* ``FutureBase`` objects to their values;
consequently, ``AsyncTask`` objects are also
converted to their values
* Tuples containing ``FutureBase`` or ``AsyncTask``
objects are converted to tuples containing their
values
* Tuples are processed recursively.
"""
if value is None: # Special case
return None
elif isinstance(value, futures.FutureBase):
future = value
return future.value()
elif type(value) is tuple:
tpl = value
# Simple perf. optimization
length = len(tpl)
if length <= 1:
if length == 0:
return ()
return (unwrap(tpl[0]),)
elif length == 2:
return (unwrap(tpl[0]), unwrap(tpl[1]))
else:
result = []
for item in tpl:
result.append(unwrap(item))
return tuple(result)
elif type(value) is list:
lst = value
return [unwrap(item) for item in lst]
elif type(value) is dict:
dct = value
return {key: unwrap(value) for key, value in six.iteritems(dct)}
else:
raise TypeError(
"Cannot unwrap an object of type '%s': only futures and None are allowed." %
type(value)
)
# Private part
_empty_tuple = tuple()
_empty_dictionary = dict()
globals()['_empty_tuple'] = _empty_tuple
globals()['_empty_dictionary'] = _empty_dictionary
def _cancel_futures(value, error):
"""Used by ``AsyncTask._continue`` to cancel evaluation of tasks
and futures due to failure of one of them.
"""
if value is None:
return
if isinstance(value, AsyncTask):
if not value.is_computed():
value._queue_throw_error(error)
elif isinstance(value, futures.FutureBase):
if not value.is_computed():
value.set_error(error)
```
#### File: asynq/tests/test_performance.py
```python
import gc
from asynq import async, debug, result, AsyncTask
from .helpers import Profiler
values = {}
class WrappedAsyncTask(AsyncTask):
pass
def wrapped_async(*args, **kwargs):
return async(*args, cls=WrappedAsyncTask, **kwargs)
# async = decorators.async_old
# async = wrapped_async
@async(pure=True)
def get(key):
global values
result(values.get(key)); return
yield # Must be a generator
@async(pure=True)
def set(key, value):
global values
values[key] = value
return
yield # Must be a generator
@async(pure=True)
def get_and_set(key_from, key_to):
value = yield get(key_from)
yield set(key_to, value)
@async(pure=True)
def performance_test(task_count):
global values
values = {}
assert len(values) == 0 # Nothing is executed yet!
yield set(0, 0)
assert len(values) == 1
yield [get_and_set(i, i + 1) for i in range(0, task_count)]
assert len(values) == task_count + 1 # Done at this point
def test():
with Profiler('test_performance(100): warming up'):
performance_test(100).value()
gc.collect()
with Profiler('test_performance(3000): actual test (w/assertions)'):
performance_test(3000).value()
gc.collect()
with debug.disable_complex_assertions(), \
Profiler('test_performance(3000): actual test (w/o assertions)'):
performance_test(3000).value()
```
|
{
"source": "JelleZijlstra/pytype",
"score": 3
}
|
#### File: pytype/pytype/abstract_test.py
```python
import unittest
from pytype import abstract
from pytype import config
from pytype import errors
from pytype import exceptions
from pytype import function
from pytype import vm
from pytype.pytd import cfg
from pytype.pytd import pytd
import unittest
class FakeFrame(object):
def __init__(self):
self.current_opcode = None
class AbstractTestBase(unittest.TestCase):
def setUp(self):
self._vm = vm.VirtualMachine(errors.ErrorLog(), config.Options([""]))
self._program = cfg.Program()
self._node = self._vm.root_cfg_node.ConnectNew("test_node")
def new_var(self, *values):
"""Create a Variable bound to the given values."""
var = self._program.NewVariable()
for value in values:
var.AddBinding(value, source_set=(), where=self._node)
return var
def new_dict(self, **kwargs):
"""Create a Dict from keywords mapping names to Variable objects."""
d = abstract.Dict(self._vm, self._node)
for name, var in kwargs.items():
d.set_str_item(self._node, name, var)
return d
class InstanceTest(AbstractTestBase):
# TODO(dbaum): Is it worth adding a test for frozenset()? There isn't
# an easy way to create one directly from the vm, it is already covered
# in test_splits.py, and there aren't any new code paths. Perhaps it isn't
# worth the effort.
def test_compatible_with_non_container(self):
# Compatible with either True or False.
i = abstract.Instance(
self._vm.convert.object_type, self._vm, self._node)
self.assertIs(True, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
def test_compatible_with_list(self):
i = abstract.Instance(
self._vm.convert.list_type, self._vm, self._node)
i.init_type_parameters(abstract.T)
# Empty list is not compatible with True.
self.assertIs(False, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
# Once a type parameter is set, list is compatible with True and False.
i.merge_type_parameter(self._node, abstract.T, self._vm.convert.object_type)
self.assertIs(True, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
def test_compatible_with_set(self):
i = abstract.Instance(
self._vm.convert.set_type, self._vm, self._node)
i.init_type_parameters(abstract.T)
# Empty list is not compatible with True.
self.assertIs(False, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
# Once a type parameter is set, list is compatible with True and False.
i.merge_type_parameter(self._node, abstract.T, self._vm.convert.object_type)
self.assertIs(True, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
def test_compatible_with_none(self):
# This test is specifically for abstract.Instance, so we don't use
# self._vm.convert.none, which is an AbstractOrConcreteValue.
i = abstract.Instance(
self._vm.convert.none_type, self._vm, self._node)
self.assertIs(False, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
class TupleTest(AbstractTestBase):
def setUp(self):
super(TupleTest, self).setUp()
self._var = self._program.NewVariable()
self._var.AddBinding(abstract.Unknown(self._vm), [], self._node)
def test_compatible_with__not_empty(self):
t = abstract.Tuple((self._var,), self._vm, self._node)
self.assertIs(True, t.compatible_with(True))
self.assertIs(False, t.compatible_with(False))
def test_compatible_with__empty(self):
t = abstract.Tuple((), self._vm, self._node)
self.assertIs(False, t.compatible_with(True))
self.assertIs(True, t.compatible_with(False))
def test_getitem__concrete_index(self):
t = abstract.Tuple((self._var,), self._vm, self._node)
index = self._vm.convert.constant_to_var("index", 0)
node, var = t.getitem_slot(self._node, index)
self.assertIs(node, self._node)
self.assertIs(var, self._var)
def test_getitem__abstract_index(self):
t = abstract.Tuple((self._var,), self._vm, self._node)
index = self._vm.convert.build_int(self._node)
node, var = t.getitem_slot(self._node, index)
self.assertIs(node, self._node)
self.assertIs(abstract.get_atomic_value(var),
abstract.get_atomic_value(self._var))
class DictTest(AbstractTestBase):
def setUp(self):
super(DictTest, self).setUp()
self._d = abstract.Dict(self._vm, self._node)
self._var = self._program.NewVariable()
self._var.AddBinding(abstract.Unknown(self._vm), [], self._node)
def test_compatible_with__when_empty(self):
self.assertIs(False, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
def test_compatible_with__after_setitem(self):
# Once a slot is added, dict is ambiguous.
self._d.setitem_slot(self._node, self._var, self._var)
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
def test_compatible_with__after_set_str_item(self):
self._d.set_str_item(self._node, "key", self._var)
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(False, self._d.compatible_with(False))
@unittest.skip("update() does not update the parameters")
def test_compatible_with__after_update(self):
# Updating an empty dict also makes it ambiguous.
self._d.update(self._node, abstract.Unknown(self._vm))
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
class IsInstanceTest(AbstractTestBase):
def setUp(self):
super(IsInstanceTest, self).setUp()
self._is_instance = abstract.IsInstance(self._vm)
# Easier access to some primitive instances.
self._bool = self._vm.convert.primitive_class_instances[bool]
self._int = self._vm.convert.primitive_class_instances[int]
self._str = self._vm.convert.primitive_class_instances[str]
# Values that represent primitive classes.
self._obj_class = abstract.get_atomic_value(
self._vm.convert.primitive_classes[object])
self._int_class = abstract.get_atomic_value(
self._vm.convert.primitive_classes[int])
self._str_class = abstract.get_atomic_value(
self._vm.convert.primitive_classes[str])
def assert_call(self, expected, left, right):
"""Check that call() returned the desired results.
Args:
expected: A dict from values to source sets, where a source set is
represented by the sorted binding names separated by spaces, for
example "left:0 right:1" would indicate binding #0 of variable
"left" and binding #1 of variable "right".
left: A Variable to use as the first arg to call().
right: A Variable to use as the second arg to call().
"""
name_map = {left: "left", right: "right"}
node, result = self._is_instance.call(
self._node, None, abstract.FunctionArgs((left, right), self.new_dict(),
None, None))
self.assertEquals(self._node, node)
result_map = {}
# Turning source sets into canonical string representations of the binding
# names makes it much easier to debug failures.
for b in result.bindings:
terms = set()
for o in b.origins:
self.assertEquals(self._node, o.where)
for sources in o.source_sets:
terms.add(" ".join(sorted(
"%s:%d" % (name_map[b.variable], b.variable.bindings.index(b))
for b in sources)))
result_map[b.data] = terms
self.assertEquals(expected, result_map)
def test_call_single_bindings(self):
right = self.new_var(self._str_class)
left = self.new_var(self._str)
self.assert_call(
{self._vm.convert.true: {"left:0 right:0"}},
left, right)
left = self.new_var(self._int)
self.assert_call(
{self._vm.convert.false: {"left:0 right:0"}},
left, right)
left = self.new_var(abstract.Unknown(self._vm))
self.assert_call(
{self._bool: {"left:0 right:0"}},
left, right)
def test_call_multiple_bindings(self):
left = self.new_var(self._int, self._str)
right = self.new_var(self._int_class, self._str_class)
self.assert_call(
{
self._vm.convert.true: {"left:0 right:0", "left:1 right:1"},
self._vm.convert.false: {"left:0 right:1", "left:1 right:0"},
}, left, right)
def test_call_wrong_argcount(self):
self._vm.push_frame(FakeFrame())
node, result = self._is_instance.call(
self._node, None, abstract.FunctionArgs((), self.new_dict(),
None, None))
self.assertEquals(self._node, node)
self.assertIsInstance(abstract.get_atomic_value(result),
abstract.Unsolvable)
self.assertRegexpMatches(
str(self._vm.errorlog),
r"isinstance.*expects 2.*got 0.*\[wrong-arg-count\]")
def test_call_wrong_keywords(self):
self._vm.push_frame(FakeFrame())
x = self.new_var(abstract.Unknown(self._vm))
node, result = self._is_instance.call(
self._node, None, abstract.FunctionArgs((x, x), self.new_dict(foo=x),
None, None))
self.assertEquals(self._node, node)
self.assertIsInstance(abstract.get_atomic_value(result),
abstract.Unsolvable)
self.assertRegexpMatches(
str(self._vm.errorlog),
r"foo.*isinstance.*\[wrong-keyword-args\]")
def test_is_instance(self):
def check(expected, left, right):
self.assertEquals(expected, self._is_instance._is_instance(left, right))
obj_class = self._vm.convert.primitive_classes[object].bindings[0].data
# Unknown and Unsolvable are ambiguous.
check(None, abstract.Unknown(self._vm), obj_class)
check(None, abstract.Unsolvable(self._vm), obj_class)
# If the object's class has multiple bindings, result is ambiguous.
obj = abstract.SimpleAbstractValue("foo", self._vm)
check(None, obj, obj_class)
obj.set_class(self._node, self.new_var(
self._str_class, self._int_class))
check(None, obj, self._str_class)
# If the class_spec is not a class, result is ambiguous.
check(None, self._str, self._str)
# Result is True/False depending on if the class is in the object's mro.
check(True, self._str, obj_class)
check(True, self._str, self._str_class)
check(False, self._str, self._int_class)
def test_flatten(self):
def maybe_var(v):
return v if isinstance(v, cfg.Variable) else self.new_var(v)
def new_tuple(*args):
pyval = tuple(maybe_var(a) for a in args)
return self._vm.convert.tuple_to_value(self._node, pyval)
def check(expected_ambiguous, expected_classes, value):
classes = []
ambiguous = self._is_instance._flatten(value, classes)
self.assertEquals(expected_ambiguous, ambiguous)
self.assertEquals(expected_classes, classes)
unknown = abstract.Unknown(self._vm)
# Simple values.
check(False, [self._str_class], self._str_class)
check(True, [], self._str)
check(True, [], unknown)
# (str, int)
check(False, [self._str_class, self._int_class],
new_tuple(self._str_class, self._int_class))
# (str, ?, int)
check(True, [self._str_class, self._int_class],
new_tuple(self._str_class, unknown, self._int_class))
# (str, (int, object))
check(False, [self._str_class, self._int_class, self._obj_class],
new_tuple(
self._str_class,
new_tuple(self._int_class, self._obj_class)))
# (str, (?, object))
check(True, [self._str_class, self._obj_class],
new_tuple(
self._str_class,
new_tuple(unknown, self._obj_class)))
# A variable with multiple bindings is ambiguous.
# (str, int | object)
check(True, [self._str_class],
new_tuple(self._str_class,
self.new_var(self._int_class, self._obj_class)))
class PyTDTest(AbstractTestBase):
"""Tests for abstract -> pytd type conversions."""
def testMetaclass(self):
cls = abstract.InterpreterClass("X", [], {}, None, self._vm)
meta = abstract.InterpreterClass("M", [], {}, None, self._vm)
meta.official_name = "M"
cls.cls = meta.to_variable(self._vm.root_cfg_node)
pytd_cls = cls.to_pytd_def(self._vm.root_cfg_node, "X")
self.assertEquals(pytd_cls.metaclass, pytd.NamedType("M"))
def testInheritedMetaclass(self):
parent = abstract.InterpreterClass("X", [], {}, None, self._vm)
meta = abstract.InterpreterClass("M", [], {}, None, self._vm)
meta.official_name = "M"
parent.cls = meta.to_variable(self._vm.root_cfg_node)
child = abstract.InterpreterClass(
"Y", [parent.to_variable(self._vm.root_cfg_node)], {}, None, self._vm)
self.assertIs(child.cls, parent.cls)
pytd_cls = child.to_pytd_def(self._vm.root_cfg_node, "Y")
self.assertIs(pytd_cls.metaclass, None)
def testMetaclassUnion(self):
cls = abstract.InterpreterClass("X", [], {}, None, self._vm)
meta1 = abstract.InterpreterClass("M1", [], {}, None, self._vm)
meta2 = abstract.InterpreterClass("M2", [], {}, None, self._vm)
meta1.official_name = "M1"
meta2.official_name = "M2"
cls.cls = abstract.Union(
[meta1, meta2], self._vm).to_variable(self._vm.root_cfg_node)
pytd_cls = cls.to_pytd_def(self._vm.root_cfg_node, "X")
self.assertEquals(pytd_cls.metaclass, pytd.UnionType(
(pytd.NamedType("M1"), pytd.NamedType("M2"))))
def testToTypeWithView1(self):
# to_type(<instance of List[int or unsolvable]>, view={T: int})
instance = abstract.Instance(
self._vm.convert.list_type, self._vm, self._vm.root_cfg_node)
instance.type_parameters["T"] = self._vm.program.NewVariable(
[self._vm.convert.unsolvable], [], self._vm.root_cfg_node)
param_binding = instance.type_parameters["T"].AddBinding(
self._vm.convert.primitive_class_instances[int], [],
self._vm.root_cfg_node)
view = {instance.cls: instance.cls.bindings[0],
instance.type_parameters["T"]: param_binding,
param_binding.data.cls: param_binding.data.cls.bindings[0]}
pytd_type = instance.to_type(self._vm.root_cfg_node, seen=None, view=view)
self.assertEquals("__builtin__.list", pytd_type.base_type.name)
self.assertSetEqual({"__builtin__.int"},
{t.name for t in pytd_type.parameters})
def testToTypeWithView2(self):
# to_type(<instance of <str or unsolvable>>, view={__class__: str})
cls = self._vm.program.NewVariable(
[self._vm.convert.unsolvable], [], self._vm.root_cfg_node)
cls_binding = cls.AddBinding(
self._vm.convert.str_type.data[0], [], self._vm.root_cfg_node)
instance = abstract.Instance(cls, self._vm, self._vm.root_cfg_node)
view = {cls: cls_binding}
pytd_type = instance.to_type(self._vm.root_cfg_node, seen=None, view=view)
self.assertEquals("__builtin__.str", pytd_type.name)
def testToTypeWithView3(self):
# to_type(<tuple (int or str,)>, view={0: str})
param1 = self._vm.convert.primitive_class_instances[int]
param2 = self._vm.convert.primitive_class_instances[str]
param_var = param1.to_variable(self._vm.root_cfg_node)
str_binding = param_var.AddBinding(param2, [], self._vm.root_cfg_node)
instance = abstract.Tuple((param_var,), self._vm, self._vm.root_cfg_node)
view = {param_var: str_binding, instance.cls: instance.cls.bindings[0],
str_binding.data.cls: str_binding.data.cls.bindings[0]}
pytd_type = instance.to_type(self._vm.root_cfg_node, seen=None, view=view)
self.assertEquals(pytd_type.parameters[0],
pytd.NamedType("__builtin__.str"))
def testToTypeWithViewAndEmptyParam(self):
instance = abstract.Instance(
self._vm.convert.list_type, self._vm, self._vm.root_cfg_node)
instance.type_parameters["T"] = self._vm.program.NewVariable()
view = {instance.cls: instance.cls.bindings[0]}
pytd_type = instance.to_type(self._vm.root_cfg_node, seen=None, view=view)
self.assertEquals("__builtin__.list", pytd_type.base_type.name)
self.assertSequenceEqual((pytd.NothingType(),), pytd_type.parameters)
def testTypingContainer(self):
cls = self._vm.convert.list_type.bindings[0].data
container = abstract.AnnotationContainer("List", self._vm, cls)
expected = pytd.HomogeneousContainerType(pytd.NamedType("__builtin__.list"),
(pytd.AnythingType(),))
actual = container.get_instance_type(self._vm.root_cfg_node)
self.assertEquals(expected, actual)
# TODO(rechen): Test InterpreterFunction.
class FunctionTest(AbstractTestBase):
def _make_pytd_function(self, params):
pytd_params = []
for i, p in enumerate(params):
p_type = pytd.ClassType(p.name)
p_type.cls = p
pytd_params.append(
pytd.Parameter("_" + str(i), p_type, False, False, None))
pytd_sig = pytd.Signature(
tuple(pytd_params), None, None, pytd.AnythingType(), (), ())
sig = abstract.PyTDSignature("f", pytd_sig, self._vm)
return abstract.PyTDFunction("f", (sig,), pytd.METHOD, self._vm)
def _call_pytd_function(self, f, args):
b = f.to_variable(self._vm.root_cfg_node).bindings[0]
return f.call(
self._vm.root_cfg_node, b, abstract.FunctionArgs(posargs=args))
def test_call_with_empty_arg(self):
self.assertRaises(exceptions.ByteCodeTypeError, self._call_pytd_function,
self._make_pytd_function(params=()),
(self._vm.program.NewVariable(),))
def test_call_with_bad_arg(self):
f = self._make_pytd_function(
(self._vm.lookup_builtin("__builtin__.str"),))
arg = self._vm.convert.primitive_class_instances[int].to_variable(
self._vm.root_cfg_node)
self.assertRaises(
abstract.WrongArgTypes, self._call_pytd_function, f, (arg,))
def test_simple_call(self):
f = self._make_pytd_function(
(self._vm.lookup_builtin("__builtin__.str"),))
arg = self._vm.convert.primitive_class_instances[str].to_variable(
self._vm.root_cfg_node)
node, ret = self._call_pytd_function(f, (arg,))
self.assertIs(node, self._vm.root_cfg_node)
retval, = ret.bindings
self.assertIs(retval.data, self._vm.convert.unsolvable)
def test_call_with_multiple_arg_bindings(self):
f = self._make_pytd_function(
(self._vm.lookup_builtin("__builtin__.str"),))
arg = self._vm.program.NewVariable()
arg.AddBinding(self._vm.convert.primitive_class_instances[str], [],
self._vm.root_cfg_node)
arg.AddBinding(self._vm.convert.primitive_class_instances[int], [],
self._vm.root_cfg_node)
node, ret = self._call_pytd_function(f, (arg,))
self.assertIs(node, self._vm.root_cfg_node)
retval, = ret.bindings
self.assertIs(retval.data, self._vm.convert.unsolvable)
def test_call_with_skipped_combination(self):
f = self._make_pytd_function(
(self._vm.lookup_builtin("__builtin__.str"),))
node = self._vm.root_cfg_node.ConnectNew()
arg = self._vm.convert.primitive_class_instances[str].to_variable(node)
node, ret = self._call_pytd_function(f, (arg,))
self.assertIs(node, self._vm.root_cfg_node)
self.assertFalse(ret.bindings)
def test_signature_from_pytd(self):
# def f(self: Any, *args: Any)
self_param = pytd.Parameter("self", pytd.AnythingType(), False, False, None)
args_param = pytd.Parameter("args", pytd.AnythingType(), False, True, None)
sig = function.Signature.from_pytd(
self._vm, "f", pytd.Signature(
(self_param,), args_param, None, pytd.AnythingType(), (), ()))
self.assertEquals(sig.name, "f")
self.assertSequenceEqual(sig.param_names, ("self",))
self.assertEquals(sig.varargs_name, "args")
self.assertFalse(sig.kwonly_params)
self.assertIs(sig.kwargs_name, None)
self.assertSetEqual(set(sig.annotations), {"self", "args"})
self.assertFalse(sig.late_annotations)
self.assertFalse(sig.has_return_annotation)
self.assertTrue(sig.has_param_annotations)
def test_signature_annotations(self):
# def f(self: Any, *args: Any)
self_param = pytd.Parameter("self", pytd.AnythingType(), False, False, None)
args_param = pytd.Parameter("args", pytd.AnythingType(), False, True, None)
sig = function.Signature.from_pytd(
self._vm, "f", pytd.Signature(
(self_param,), args_param, None, pytd.AnythingType(), (), ()))
self.assertIs(sig.annotations["self"], self._vm.convert.unsolvable)
args_type = sig.annotations["args"] # Should be Tuple[Any]
self.assertIsInstance(args_type, abstract.ParameterizedClass)
self.assertIs(args_type.base_cls,
abstract.get_atomic_value(self._vm.convert.tuple_type))
self.assertDictEqual(args_type.type_parameters,
{abstract.T: self._vm.convert.unsolvable})
self.assertIs(sig.drop_first_parameter().annotations["args"], args_type)
def test_signature_annotations_existence(self):
# def f(v: "X") -> "Y"
sig = function.Signature(
name="f",
param_names=("v",),
varargs_name=None,
kwonly_params=(),
kwargs_name=None,
defaults={},
annotations={},
late_annotations={
"v": function.LateAnnotation("X", "v", None),
"return": function.LateAnnotation("Y", "return", None)
}
)
self.assertFalse(sig.has_param_annotations)
self.assertFalse(sig.has_return_annotation)
sig.set_annotation("v", self._vm.convert.unsolvable)
self.assertTrue(sig.has_param_annotations)
self.assertFalse(sig.has_return_annotation)
sig.set_annotation("return", self._vm.convert.unsolvable)
self.assertTrue(sig.has_param_annotations)
self.assertTrue(sig.has_return_annotation)
if __name__ == "__main__":
unittest.main()
```
#### File: pytype/pytype/convert_structural_test.py
```python
import textwrap
import unittest
from pytype import convert_structural
from pytype.pyi import parser
from pytype.pytd import pytd
from pytype.pytd.parse import builtins
from pytype.pytd.parse import visitors
from pytype.tests import test_inference
import unittest
class MatchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.builtins_pytd = builtins.GetBuiltinsPyTD()
def parse(self, src):
ast = parser.parse_string(textwrap.dedent(src))
ast = ast.Visit(visitors.LookupBuiltins(builtins.GetBuiltinsAndTyping()[0]))
return ast
def parse_and_solve(self, src):
ast = self.parse(src)
ast = ast.Visit(visitors.NamedTypeToClassType())
ast = ast.Visit(visitors.AdjustTypeParameters())
types, _ = convert_structural.solve(ast, builtins_pytd=self.builtins_pytd)
# Drop "__builtin__" prefix, for more readable tests.
return {k: {v.rpartition("__builtin__.")[2] for v in l}
for k, l in types.items()}
def test_simple(self):
mapping = self.parse_and_solve("""
class `~unknown2`(object):
pass
class `~unknown1`(object):
def __add__(self, _1: `~unknown2`) -> int
""")
self.assertItemsEqual(["int", "bool"], mapping["~unknown1"])
self.assertItemsEqual(["int", "bool"], mapping["~unknown2"])
def test_float_and_bytearray(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
def __add__(self, _1: int) -> float
def __add__(self, _1: float) -> float
class `~unknown2`(object):
def __add__(self, _1: str) -> bytearray
def __add__(self, _1: bytearray) -> bytearray
""")
self.assertItemsEqual(["float"], mapping["~unknown1"])
self.assertItemsEqual(["bytearray"], mapping["~unknown2"])
def test_float_and_bytearray2(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
def __add__(self, _1: int or float) -> float
class `~unknown2`(object):
def __add__(self, _1: bytearray) -> bytearray
""")
self.assertItemsEqual(["float"], mapping["~unknown1"])
self.assertItemsEqual(["bytearray"], mapping["~unknown2"])
def test_append(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
def append(self, _1: int) -> NoneType
""")
self.assertItemsEqual(["list", "bytearray",
"typing.List", "typing.MutableSequence"],
mapping["~unknown1"])
def test_single_list(self):
# Differs from test_append in that append(float) doesn't match bytearray
mapping = self.parse_and_solve("""
class `~unknown1`(object):
def append(self, _1: float) -> NoneType
""")
convert_structural.log_info_mapping(mapping)
self.assertItemsEqual(["list", "typing.MutableSequence", "typing.List"],
mapping["~unknown1"])
self.assertItemsEqual(["float"], mapping["~unknown1.__builtin__.list.T"])
def test_list(self):
mapping = self.parse_and_solve("""
class `~unknown2`(object):
def append(self, _1: `~unknown1`) -> NoneType
def __getitem__(self, _1: ?) -> `~unknown1`
class `~unknown1`(object):
def __add__(self: float, _1: int) -> float
def __add__(self: float, _1: float) -> float
""")
convert_structural.log_info_mapping(mapping)
self.assertItemsEqual(["float"], mapping["~unknown1"])
self.assertItemsEqual(["list", "typing.List", "typing.MutableSequence"],
mapping["~unknown2"])
self.assertItemsEqual(["float"], mapping["~unknown2.__builtin__.list.T"])
def test_float_list(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
def append(self, _1: ?) -> NoneType
def __getitem__(self, _1: int) -> float
""")
convert_structural.log_info_mapping(mapping)
self.assertItemsEqual(["list", "typing.List", "typing.MutableSequence"],
mapping["~unknown1"])
self.assertItemsEqual(["float"], mapping["~unknown1.__builtin__.list.T"])
def test_two_lists(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
def append(self: list, _1: NoneType) -> NoneType
class `~unknown2`(object):
def insert(self: list, _1: int, _2: float) -> NoneType
""")
self.assertItemsEqual(["list", "typing.List", "typing.MutableSequence"],
mapping["~unknown1"])
self.assertItemsEqual(["list", "typing.List", "typing.MutableSequence"],
mapping["~unknown2"])
self.assertItemsEqual(["NoneType"], mapping["~unknown1.__builtin__.list.T"])
self.assertItemsEqual(["float"], mapping["~unknown2.__builtin__.list.T"])
def test_float(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
def __add__(self, _1: int) -> float
""")
self.assertItemsEqual(["float"], mapping["~unknown1"])
def test_or(self):
mapping = self.parse_and_solve("""
from typing import Iterator
class `~unknown1`(object):
def join(self, _1: unicode) -> unicode
def join(self, _1: Iterator[str]) -> str
""")
self.assertItemsEqual(["str"], mapping["~unknown1"])
def test_multiple(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
def __add__(self, _1: int) -> float
def __add__(self, _1: float) -> float
class `~unknown2`(object):
def __add__(self, _1: str) -> bytearray
def __add__(self, _1: bytearray) -> bytearray
class `~unknown3`(object):
def join(self, _1: str) -> str
def join(self, _1: unicode) -> unicode
def join(self, _1: iterator) -> str
class `~unknown4`(object):
def append(self, _1: NoneType) -> NoneType
""")
self.assertItemsEqual(["float"], mapping["~unknown1"])
self.assertItemsEqual(["bytearray"], mapping["~unknown2"])
self.assertItemsEqual(["str"], mapping["~unknown3"])
self.assertItemsEqual(["list", "typing.MutableSequence", "typing.List"],
mapping["~unknown4"])
self.assertItemsEqual(["NoneType"], mapping["~unknown4.__builtin__.list.T"])
def test_union(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
def __add__(self, _1: int or float) -> float
class `~unknown2`(object):
def __add__(self, _1: bytearray) -> bytearray
""")
self.assertItemsEqual(["float"], mapping["~unknown1"])
self.assertItemsEqual(["bytearray"], mapping["~unknown2"])
def test_containers(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
def foo(self, x: list[bool]) -> int
class A(object):
def foo(self, x: list[int]) -> int
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
def test_type_parameters(self):
mapping = self.parse_and_solve("""
from typing import Generic
T = TypeVar('T')
class A(typing.Generic[T], object):
def foo(self) -> ?
def bar(self, x: T) -> ?
class `~unknown1`(object):
def foo(self) -> ?
def bar(self, x: int) -> ?
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual(["int"], mapping["~unknown1.A.T"])
def test_generic_against_generic(self):
mapping = self.parse_and_solve("""
class A():
def f(self, x: list[int]) -> ?
def g(self, x: list[float]) -> ?
class B():
def f(self, x: set[int]) -> ?
def g(self, x: list[int]) -> ?
class `~unknown1`(object):
def f(self, x: list[int]) -> ?
class `~unknown2`(object):
def g(self, x: list[int]) -> ?
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual(["B"], mapping["~unknown2"])
def test_unknown_against_generic(self):
mapping = self.parse_and_solve("""
def f(A: `~unknown0`) -> list[`~unknown8`]
class `~unknown0`():
def has_key(self, _1: ?) -> ?
def viewvalues(self) -> `~unknown2`
class `~unknown2`():
def __iter__(self) -> `~unknown4`
class `~unknown4`():
def next(self) -> `~unknown6`
class `~unknown6`():
def __sub__(self, _1: float) -> `~unknown8`
class `~unknown8`():
pass
""")
self.assertItemsEqual(["dict"], mapping["~unknown0"])
self.assertContainsSubset(["complex", "float"],
mapping["~unknown0.__builtin__.dict.V"])
self.assertItemsEqual(["dict_values"], mapping["~unknown2"])
self.assertItemsEqual(["dictionary-valueiterator"], mapping["~unknown4"])
self.assertContainsSubset(["complex", "float"], mapping["~unknown6"])
self.assertContainsSubset(["complex", "float"], mapping["~unknown8"])
def test_subclass_of_elements(self):
mapping = self.parse_and_solve("""
class A():
def f(self, x: list[int]) -> list[int]
class `~unknown1`(object):
def f(self, x: list[bool]) -> ?
class `~unknown2`(object):
def f(self, x: ?) -> list[object]
class `~unknown3`(object):
def f(self, x: list[object]) -> ?
class `~unknown4`(object):
def f(self, x: ?) -> list[bool]
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual([], mapping["~unknown2"])
self.assertItemsEqual([], mapping["~unknown3"])
self.assertItemsEqual(["A"], mapping["~unknown4"])
def test_subclass(self):
mapping = self.parse_and_solve("""
class A():
pass
class B(A):
pass
class AA(object):
def foo(self, x: A) -> A
class AB(object):
def foo(self, x: A) -> B
class BA(object):
def foo(self, x: B) -> A
class BB(object):
def foo(self, x: B) -> B
class `~unknown1`(object):
def foo(self, x: A) -> A
class `~unknown2`(object):
def foo(self, x: A) -> B
class `~unknown3`(object):
def foo(self, x: B) -> A
class `~unknown4`(object):
def foo(self, x: B) -> B
""")
self.assertItemsEqual(["AA"], mapping["~unknown1"])
self.assertItemsEqual(["AA", "AB"], mapping["~unknown2"])
self.assertItemsEqual(["AA", "BA"], mapping["~unknown3"])
self.assertItemsEqual(["AA", "AB", "BA", "BB"], mapping["~unknown4"])
def test_odd_superclass(self):
mapping = self.parse_and_solve("""
class A(nothing, nothing):
def foobar(self) -> ?
class B(?):
def foobar(self) -> ?
class C(A or B):
def foobar(self) -> ?
class D(list[int]):
def foobar(self) -> ?
T = TypeVar('T')
class E(typing.Generic[T], T):
def foobar(self) -> ?
class `~unknown1`(object):
def foobar(self) -> ?
""")
self.assertContainsSubset(["A", "B", "C", "D", "E"], mapping["~unknown1"])
@unittest.skip("not implemented")
def test_unknown_superclass(self):
# E.g. "class A(x): def foobar(self): pass" with (unknown) x = type(3)
mapping = self.parse_and_solve("""
class `~unknown1`(object):
def __add__(self, _1: int) -> int
class A(`~unknown1`):
def foobar(self) -> NoneType
class `~unknown2`(object):
def __add__(self, _1: int) -> int
def foobar(self) -> NoneType
""")
self.assertItemsEqual(["int", "bool"], mapping["~unknown1"])
self.assertItemsEqual(["A"], mapping["~unknown2"])
def test_nothing(self):
mapping = self.parse_and_solve("""
class A():
def f(self, x:nothing) -> nothing
class B():
def f(self, x:int) -> nothing
class C():
def f(self, x:nothing) -> int
class D():
def f(self, x:int) -> int
class `~unknown1`(object):
def f(self, x:nothing) -> nothing
class `~unknown2`(object):
def f(self, x:int) -> nothing
class `~unknown3`(object):
def f(self, x:nothing) -> int
class `~unknown4`(object):
def f(self, x:int) -> int
""")
self.assertItemsEqual(["A", "B", "C", "D"], mapping["~unknown1"])
self.assertItemsEqual(["B", "D"], mapping["~unknown2"])
self.assertItemsEqual(["C", "D"], mapping["~unknown3"])
self.assertItemsEqual(["D"], mapping["~unknown4"])
def test_unknown(self):
mapping = self.parse_and_solve("""
class A(?):
def f(self, x:?) -> ?
class B(?):
def f(self, x:int) -> ?
class C(?):
def f(self, x:?) -> int
class D(?):
def f(self, x:int) -> int
class `~unknown1`(object):
def f(self, x:?) -> ?
def f(self, x:int) -> ?
def f(self, x:?) -> int
def f(self, x:int) -> int
""")
convert_structural.log_info_mapping(mapping)
self.assertItemsEqual(["A", "B", "C", "D"], mapping["~unknown1"])
def test_union_left_right(self):
mapping = self.parse_and_solve("""
class A(object):
def f(self, x:int) -> int
class B(object):
def f(self, x:int) -> int or float
class C(object):
def f(self, x:int or float) -> int
class D(object):
def f(self, x:int or float) -> int or float
class `~unknown1`(object):
def f(self, x:int) -> int
class `~unknown2`(object):
def f(self, x:int or float) -> int
class `~unknown3`(object):
def f(self, x:int) -> int or float
""")
self.assertItemsEqual(["A", "B", "C", "D"], mapping["~unknown1"])
self.assertItemsEqual(["C", "D"], mapping["~unknown2"])
self.assertItemsEqual(["B", "D"], mapping["~unknown3"])
def test_different_lengths(self):
mapping = self.parse_and_solve("""
class A(object):
def f(self) -> ?
class B(object):
def f(self, x) -> ?
class C(object):
def f(self, x, y) -> ?
class `~unknown1`(object):
def f(self) -> ?
class `~unknown2`(object):
def f(self, x) -> ?
class `~unknown3`(object):
def f(self, x, y) -> ?
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual(["B"], mapping["~unknown2"])
self.assertItemsEqual(["C"], mapping["~unknown3"])
def test_filter(self):
mapping = self.parse_and_solve("""
class A(object):
def f(self, x: int or bytearray) -> ?
class `~unknown1`(object):
def f(self, _1: `~unknown2`) -> ?
class `~unknown2`(object):
def capitalize(self) -> ?
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual(["bytearray"], mapping["~unknown2"])
def test_partial(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
pass
class `~__builtin__~bool`(object):
def __and__(self, _1: `~unknown1`) -> bool
def __and__(self, _1: `~unknown2`) -> bool
class `~unknown2`(object):
pass
""")
self.assertItemsEqual(["bool", "int"], mapping["~unknown1"])
self.assertItemsEqual(["bool", "int"], mapping["~unknown2"])
def test_optional_parameters(self):
mapping = self.parse_and_solve("""
class A(object):
def f(self, ...) -> ?
class B(object):
def f(self, x, ...) -> ?
class C(object):
def f(self, x, y, ...) -> ?
class `~unknown1`(object):
def f(self) -> ?
class `~unknown2`(object):
def f(self, x) -> ?
class `~unknown3`(object):
def f(self, x, y) -> ?
class `~unknown4`(object):
def f(self, x, y, z) -> ?
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual(["A", "B"], mapping["~unknown2"])
self.assertItemsEqual(["A", "B", "C"], mapping["~unknown3"])
def test_listiterator(self):
self.parse_and_solve("""
class `~unknown1`(object):
pass
class `~__builtin__~listiterator`(object):
def next(self) -> `~unknown1`
def next(self) -> tuple[nothing, ...]
""")
def test_enumerate(self):
self.parse_and_solve("""
class `~unknown1`(object):
pass
class `~__builtin__~enumerate`(object):
def __init__(self, iterable: list[`~unknown1`]) -> NoneType
def next(self) -> tuple[?, ...]
""")
def test_call_builtin(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
pass
class `~unknown2`(object):
pass
def `~__builtin__~round`(number: `~unknown1`) -> `~unknown2`
""")
self.assertIn("float", mapping["~unknown1"])
self.assertNotIn("str", mapping["~unknown1"])
def test_fibonacci(self):
mapping = self.parse_and_solve("""
def fib(n: `~unknown4`) -> int or `~unknown12`
def fib(n: `~unknown8` or int) -> int
def foo(x: `~unknown1`) -> `~unknown3` or int
class `~__builtin__~int`(object): # TODO(kramm): Make pytype add the ~
def __eq__(self, y: int) -> `~unknown10` or bool
class `~unknown1`():
def __add__(self, _1: int) -> `~unknown3`
class `~unknown3`():
pass
class `~unknown4`():
def __eq__(self, _1: int) -> `~unknown6`
def __sub__(self, _1: int) -> `~unknown8`
def __mul__(self, _1: int) -> `~unknown12`
class `~unknown6`():
pass
class `~unknown8`():
def __eq__(self, _1: int) -> `~unknown10`
class `~unknown10`():
pass
class `~unknown12`():
pass
""")
self.assertItemsEqual(["int", "bool", "float", "complex"],
mapping["~unknown4"])
def test_add(self):
mapping = self.parse_and_solve("""
def f(self, x: `~unknown4`) -> `~unknown6`
class `~unknown4`():
def __add__(self, _1: int) -> `~unknown6`
class `~unknown6`():
pass
""")
# TODO(pludemann): remove "bool" from list when we do the
# more strict definition of return (that is, not allowing
# "bool" just because it's a subclass of "int" in __builtin__.pytd
numbers = ["int", "complex", "float", "bool"]
self.assertItemsEqual(numbers, mapping["~unknown4"])
self.assertItemsEqual(numbers, mapping["~unknown6"])
def test_subclasses(self):
mapping = self.parse_and_solve("""
class Foo(object):
def foo(self) -> Bar1
class Bar1(object):
def bar(self) -> complex
class Bar2(Bar1):
def bar(self) -> float
class `~unknown1`(object):
def foo(self) -> `~unknown2`
class `~unknown2`(object):
def bar(self) -> `~unknown3`
class `~unknown3`(object):
pass
""")
self.assertItemsEqual(["complex", "float"], mapping["~unknown3"])
def test_match_builtin_function(self):
mapping = self.parse_and_solve("""
def baz(int) -> float
def baz(complex) -> complex
def `~baz`(_1: `~unknown3`) -> `~unknown4`
class `~unknown3`(object):
pass
class `~unknown4`(object):
pass
""")
self.assertItemsEqual(["complex", "float"], mapping["~unknown4"])
def test_match_builtin_class(self):
mapping = self.parse_and_solve("""
class `~unknown1`(object):
pass
class `~unknown2`(object):
pass
T = TypeVar('T')
N = TypeVar('N')
class mylist(typing.Generic[T], object):
def __setitem__(self, i: int, y: N) -> NoneType:
self := mylist[T or N]
class `~mylist`():
def __setitem__(self, i: int, y: `~unknown2`) -> `~unknown1`
""")
self.assertItemsEqual(["NoneType"], mapping["~unknown1"])
def test_subclasses2(self):
mapping = self.parse_and_solve("""
class Foo(object):
def foo(self) -> Bar1
class Bar1(object):
def bar(self) -> Bar1
class Bar2(Bar1):
def bar(self) -> Bar2
def baz(x: Bar1) -> complex
def baz(x: Bar2) -> float
def `~baz`(x: `~unknown3`) -> `~unknown4`
class `~unknown1`(object):
def foo(self) -> `~unknown2`
class `~unknown2`(object):
def bar(self) -> `~unknown3`
class `~unknown3`(object):
pass
class `~unknown4`(object):
pass
""")
self.assertItemsEqual(["complex", "float"], mapping["~unknown4"])
def test_convert(self):
ast = self.parse("""
class A(object):
def foo(self, x: `~unknown1`) -> ?
def foobaz(self, x: int) -> int
class `~unknown1`(object):
def foobaz(self, x: int) -> int
""")
expected = textwrap.dedent("""
from typing import Any
class A(object):
def foo(self, x: A) -> Any: ...
def foobaz(self, x: int) -> int: ...
""").lstrip()
ast = convert_structural.convert_pytd(ast, self.builtins_pytd)
self.assertMultiLineEqual(pytd.Print(ast), expected)
def test_convert_with_type_params(self):
ast = self.parse("""
from typing import Dict
class A(object):
def foo(self, x: `~unknown1`) -> bool
class `~unknown1`():
def __setitem__(self, _1: str, _2: `~unknown2`) -> ?
def update(self, _1: NoneType or Dict[nothing, nothing]) -> ?
class `~unknown2`():
def append(self, _1:NoneType) -> NoneType
""")
ast = convert_structural.convert_pytd(ast, self.builtins_pytd)
x = ast.Lookup("A").Lookup("foo").signatures[0].params[1].type
self.assertIn("MutableSequence", pytd.Print(x))
def test_isinstance(self):
ast = self.parse("""
x = ... # type: `~unknown1`
def `~__builtin__~isinstance`(object: int, class_or_type_or_tuple: tuple[nothing, ...]) -> `~unknown1`
class `~unknown1`(object):
pass
""")
expected = textwrap.dedent("""
x = ... # type: bool
""").strip()
ast = convert_structural.convert_pytd(ast, self.builtins_pytd)
self.assertMultiLineEqual(pytd.Print(ast), expected)
def test_match_superclass(self):
mapping = self.parse_and_solve("""
class Base1():
def f(self, x:Base1) -> Base2
class Base2():
def g(self) -> Base1
class Foo(Base1, Base2):
pass
class `~unknown1`():
def f(self, x:Base1) -> Base2
""")
self.assertItemsEqual(["Foo", "Base1"], mapping["~unknown1"])
if __name__ == "__main__":
test_inference.main()
```
#### File: pytype/pytype/imports_map_loader.py
```python
import collections
import logging
import os
import shlex
import textwrap
log = logging.getLogger(__name__)
def _read_imports_map(options_info_path):
"""Read the imports_map file, fold duplicate entries into a multimap."""
if options_info_path is None:
return None
imports_multimap = collections.defaultdict(set)
with open(options_info_path) as fi:
for line in fi:
line = line.strip()
if line:
short_path, path = shlex.split(line)
short_path, _ = os.path.splitext(short_path) # drop extension
imports_multimap[short_path].add(path)
# Sort the multimap. Move items with '#' in the base name, generated for
# analysis results via --api, first, so we prefer them over others.
return {short_path: sorted(paths, key=os.path.basename)
for short_path, paths in imports_multimap.items()}
def _validate_map(imports_map, src_out):
"""Validate the imports map against the command line arguments.
Validate the map. Note that main.py has ensured that all output files also
exist, in case they're actually used for input, e.g. when there are multiple
files being processed.
Args:
imports_map: The map returned by _read_imports_map.
src_out: The command line arguments - pairs of file, as specified on the
command line as "src:out".
Raises:
AssertionError: If we found an error in the imports map.
"""
# If pytype is processing multiple files that import each other, during the
# first pass, we don't have a .pyi for them yet, even though they might be
# mentioned in the imports_map. So fill them with temporary contents.
for src, output in src_out:
if output is None:
continue
if os.path.exists(output):
log.error("output file %r (from processing %r) already exists; "
"will be overwritten",
os.path.abspath(output), src)
with open(output, "w") as fi:
fi.write(textwrap.dedent("""\
# If you see this comment, it means pytype hasn't properly
# processed %r to %r.
from typing import Any
def __getattr__(name) -> Any: ...
""" % (src, output)))
# Now, validate the imports_map.
for short_path, paths in imports_map.items():
for path in paths:
if not os.path.exists(path):
log.error("imports_map file does not exist: %r (mapped from %r)",
path, short_path)
log.error("tree walk of files from '.' (%r):", os.path.abspath("."))
for dirpath, _, files in os.walk(".", followlinks=False):
log.error("... dir %r: %r", dirpath, files)
log.error("end tree walk of files from '.'")
raise AssertionError("bad import map")
def build_imports_map(options_info_path, src_out=None):
"""Create a file mapping from a .imports_info file.
Builds a dict of short_path to full name
(e.g. "path/to/file.py" =>
"$GENDIR/rulename~~pytype-gen/path_to_file.py~~pytype"
Args:
options_info_path: The file with the info (may be None, for do-nothing)
src_out: The src/output files from the command line. When validating the
imports_info, these outputs should *not* exist. (The check is only
done if options_Info_path is not None, because other build systems
might not ensure that output files are deleted before processing).
Returns:
Dict of .py short_path to list of .pytd path or None if no options_info_path
"""
imports_multimap = _read_imports_map(options_info_path)
# Output warnings for all multiple
# mappings and keep the lexicographically first.
for short_path, paths in imports_multimap.items():
if len(paths) > 1:
log.warn("Multiple files for %r => %r ignoring %r",
short_path, paths[0], paths[1:])
imports_map = {short_path: os.path.abspath(paths[0])
for short_path, paths in imports_multimap.items()}
if src_out is not None:
_validate_map(imports_multimap, src_out)
# Add the potential directory nodes for adding "__init__", because some build
# systems automatically create __init__.py in empty directories. These are
# added with the path name appended with "/" (os.sep), mapping to the empty
# file. See also load_pytd._import_file which also checks for an empty
# directory and acts as if an empty __init__.py is there.
# TODO(pludemann): remove either this code or the code in pytd_load.
dir_paths = {}
for short_path, path in sorted(imports_map.items()):
dir_paths[short_path] = path
short_path_pieces = short_path.split(os.sep)
# If we have a mapping file foo/bar/quux.py', then the pieces are ["foo",
# "bar", "quux"] and we want to add foo/__init__.py and foo/bar/__init__.py
for i in range(1, len(short_path_pieces)):
intermediate_dir_init = os.path.join(*(
short_path_pieces[:i] + ["__init__"]))
if (intermediate_dir_init not in imports_map and
intermediate_dir_init not in dir_paths):
log.warn("Created empty __init__ %r", intermediate_dir_init)
dir_paths[intermediate_dir_init] = os.devnull
return dir_paths
```
#### File: pytype/pytype/load_pytd_test.py
```python
import unittest
from pytype import config
from pytype import load_pytd
from pytype import utils
from pytype.pytd import pytd
import unittest
class ImportPathsTest(unittest.TestCase):
"""Tests for load_pytd.py."""
PYTHON_VERSION = (2, 7)
def setUp(self):
self.options = config.Options.create(python_version=self.PYTHON_VERSION)
def testBuiltinSys(self):
loader = load_pytd.Loader("base", self.options)
ast = loader.import_name("sys")
self.assertTrue(ast.Lookup("sys.exit"))
def testBasic(self):
with utils.Tempdir() as d:
d.create_file("path/to/some/module.pyi", "def foo(x:int) -> str")
self.options.tweak(pythonpath=[d.path])
loader = load_pytd.Loader("base", self.options)
ast = loader.import_name("path.to.some.module")
self.assertTrue(ast.Lookup("path.to.some.module.foo"))
def testPath(self):
with utils.Tempdir() as d1:
with utils.Tempdir() as d2:
d1.create_file("dir1/module1.pyi", "def foo1() -> str")
d2.create_file("dir2/module2.pyi", "def foo2() -> str")
self.options.tweak(pythonpath=[d1.path, d2.path])
loader = load_pytd.Loader("base", self.options)
module1 = loader.import_name("dir1.module1")
module2 = loader.import_name("dir2.module2")
self.assertTrue(module1.Lookup("dir1.module1.foo1"))
self.assertTrue(module2.Lookup("dir2.module2.foo2"))
def testInit(self):
with utils.Tempdir() as d1:
d1.create_file("baz/__init__.pyi", "x = ... # type: int")
self.options.tweak(pythonpath=[d1.path])
loader = load_pytd.Loader("base", self.options)
self.assertTrue(loader.import_name("baz").Lookup("baz.x"))
def testBuiltins(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", "x = ... # type: int")
self.options.tweak(pythonpath=[d.path])
loader = load_pytd.Loader("base", self.options)
mod = loader.import_name("foo")
self.assertEquals("__builtin__.int", mod.Lookup("foo.x").type.cls.name)
self.assertEquals("__builtin__.int", mod.Lookup("foo.x").type.name)
@unittest.skip("automatic creation of __init__ only works with imports_map")
def testNoInit(self):
with utils.Tempdir() as d:
d.create_directory("baz")
self.options.tweak(pythonpath=[d.path])
loader = load_pytd.Loader("base", self.options)
self.assertTrue(loader.import_name("baz"))
def testStdlib(self):
loader = load_pytd.Loader("base", self.options)
ast = loader.import_name("StringIO")
self.assertTrue(ast.Lookup("StringIO.StringIO"))
def testDeepDependency(self):
with utils.Tempdir() as d:
d.create_file("module1.pyi", "def get_bar() -> module2.Bar")
d.create_file("module2.pyi", "class Bar:\n pass")
self.options.tweak(pythonpath=[d.path])
loader = load_pytd.Loader("base", self.options)
module1 = loader.import_name("module1")
f, = module1.Lookup("module1.get_bar").signatures
self.assertEquals("module2.Bar", f.return_type.cls.name)
def testCircularDependency(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
def get_bar() -> bar.Bar
class Foo:
pass
""")
d.create_file("bar.pyi", """
def get_foo() -> foo.Foo
class Bar:
pass
""")
self.options.tweak(pythonpath=[d.path])
loader = load_pytd.Loader("base", self.options)
foo = loader.import_name("foo")
bar = loader.import_name("bar")
f1, = foo.Lookup("foo.get_bar").signatures
f2, = bar.Lookup("bar.get_foo").signatures
self.assertEquals("bar.Bar", f1.return_type.cls.name)
self.assertEquals("foo.Foo", f2.return_type.cls.name)
def testRelative(self):
with utils.Tempdir() as d:
d.create_file("__init__.pyi", "base = ... # type: ?")
d.create_file("path/__init__.pyi", "path = ... # type: ?")
d.create_file("path/to/__init__.pyi", "to = ... # type: ?")
d.create_file("path/to/some/__init__.pyi", "some = ... # type: ?")
d.create_file("path/to/some/module.pyi", "")
self.options.tweak(pythonpath=[d.path])
loader = load_pytd.Loader("path.to.some.module", self.options)
some = loader.import_relative(1)
to = loader.import_relative(2)
path = loader.import_relative(3)
# Python doesn't allow "...." here, so don't test import_relative(4).
self.assertTrue(some.Lookup("path.to.some.some"))
self.assertTrue(to.Lookup("path.to.to"))
self.assertTrue(path.Lookup("path.path"))
def testTypeShed(self):
loader = load_pytd.Loader("base", self.options)
self.assertTrue(loader.import_name("UserDict"))
def testResolveAlias(self):
with utils.Tempdir() as d:
d.create_file("module1.pyi", """
from typing import List
x = List[int]
""")
d.create_file("module2.pyi", """
def f() -> module1.x
""")
self.options.tweak(pythonpath=[d.path])
loader = load_pytd.Loader("base", self.options)
module2 = loader.import_name("module2")
f, = module2.Lookup("module2.f").signatures
self.assertEquals("List[int]", pytd.Print(f.return_type))
def testImportMapCongruence(self):
with utils.Tempdir() as d:
foo_path = d.create_file("foo.pyi", "class X: ...")
bar_path = d.create_file("bar.pyi", "X = ... # type: another.foo.X")
# Map the same pyi file under two module paths.
imports_map = {
"foo": foo_path,
"another/foo": foo_path,
"bar": bar_path,
"empty1": "/dev/null",
"empty2": "/dev/null",
}
# We cannot use tweak(imports_info=...) because that doesn't trigger
# post-processing and we need an imports_map for the loader.
self.options.imports_map = imports_map
loader = load_pytd.Loader("base", self.options)
normal = loader.import_name("foo")
self.assertEquals("foo", normal.name)
loader.import_name("bar") # check that we can resolve against another.foo
another = loader.import_name("another.foo")
# We do *not* treat foo.X and another.foo.X the same, because Python
# doesn't, either:
self.assertIsNot(normal, another)
self.assertTrue([c.name.startswith("foo")
for c in normal.classes])
self.assertTrue([c.name.startswith("another.foo")
for c in another.classes])
# Make sure that multiple modules using /dev/null are not treated as
# congruent.
empty1 = loader.import_name("empty1")
empty2 = loader.import_name("empty2")
self.assertIsNot(empty1, empty2)
self.assertEquals("empty1", empty1.name)
self.assertEquals("empty2", empty2.name)
if __name__ == "__main__":
unittest.main()
```
#### File: pytype/pytype/mro.py
```python
from pytype import utils
from pytype.pytd import pytd
from pytype.pytd import utils as pytd_utils
def flattened_superclasses(cls):
"""Given a pytd.Class return a list of all superclasses.
Args:
cls: A pytd.Class object.
Returns:
A frozenset of all superclasses of the given class including itself and any
transitive superclasses.
"""
if isinstance(cls, pytd.ClassType):
cls = cls.cls
return frozenset([cls]) | frozenset(c
for base in cls.parents
for c in flattened_superclasses(base))
def compute_mro(c):
"""Compute the class precedence list (mro) according to C3.
This code is copied from the following URL with print statements removed.
https://www.python.org/download/releases/2.3/mro/
Args:
c: The Class to compute the MRO for. This needs to be an instance
with the members "mro" and "bases".
Returns:
A list of Class objects in Method Resolution Order.
"""
bases = utils.concat_lists(b.data for b in c.bases())
return tuple(pytd_utils.MROMerge([[c]] +
[list(base.mro) for base in bases] +
[list(bases)]))
```
#### File: pytype/pytd/explain_test.py
```python
from pytype.pytd import cfg as typegraph
from pytype.pytd import explain
import unittest
class ExplainTest(unittest.TestCase):
"""Test explanations."""
def setUp(self):
# n1------->n2
# | |
# v v
# n3------->n4
# [n2] x = a; y = a
# [n3] x = b; y = b
# [n4] z = x & y
self.p = typegraph.Program()
self.n1 = self.p.NewCFGNode("n1")
self.n2 = self.n1.ConnectNew("n2")
self.n3 = self.n1.ConnectNew("n3")
self.n4 = self.n2.ConnectNew("n4")
self.n3.ConnectTo(self.n4)
self.x = self.p.NewVariable()
self.y = self.p.NewVariable()
self.z = self.p.NewVariable()
self.w = self.p.NewVariable()
self.xa = self.x.AddBinding("a", source_set=[], where=self.n2)
self.ya = self.y.AddBinding("a", source_set=[], where=self.n2)
self.xb = self.x.AddBinding("b", source_set=[], where=self.n3)
self.yb = self.y.AddBinding("b", source_set=[], where=self.n3)
self.za = self.z.AddBinding(
"a", source_set=[self.xa, self.ya], where=self.n4)
self.zb = self.z.AddBinding(
"b", source_set=[self.xb, self.yb], where=self.n4)
self.zab = self.z.AddBinding("a&b")
self.zab.AddOrigin(source_set=[self.xa, self.yb], where=self.n4)
self.zab.AddOrigin(source_set=[self.xb, self.ya], where=self.n4)
self.p.entrypoint = self.n1
def testValid(self):
self.assertTrue(explain.Explain([self.xa, self.ya], self.n4))
def testBadApple(self):
# x = 'a' spoils y = 'b'
self.assertFalse(explain.Explain([self.xa, self.yb], self.n4))
def testConflicting(self):
self.assertFalse(explain.Explain([self.xa, self.xb], self.n4))
def testBadSources(self):
self.assertFalse(explain.Explain([self.zab], self.n4))
def testUnordered(self):
p = typegraph.Program()
n0 = p.NewCFGNode("n0")
n1 = n0.ConnectNew("n1")
x = p.NewVariable()
y = p.NewVariable()
x0 = x.AddBinding(0, [], n0)
x1 = x.AddBinding(1, [], n0)
x2 = x.AddBinding(2, [], n0)
y0 = y.AddBinding(0, [x0], n1)
y1 = y.AddBinding(1, [x1], n1)
y2 = y.AddBinding(2, [x2], n1)
p.entrypoint = n0
self.assertTrue(explain.Explain([x0], n0))
self.assertTrue(explain.Explain([x1], n0))
self.assertTrue(explain.Explain([x2], n0))
self.assertTrue(explain.Explain([y0], n1))
self.assertTrue(explain.Explain([y1], n1))
self.assertTrue(explain.Explain([y2], n1))
if __name__ == "__main__":
unittest.main()
```
#### File: pytd/parse/builtins.py
```python
import cPickle
import os
import sys
from pytype.pyi import parser
from pytype.pytd import utils
from pytype.pytd.parse import visitors
def _FindBuiltinFile(name, extension=".pytd"):
return utils.GetPredefinedFile("builtins", name, extension)
def _FindStdlibFile(name, extension=".pytd"):
return utils.GetPredefinedFile("stdlib", name, extension)
# Keyed by the parameter(s) passed to GetBuiltinsPyTD:
_cached_builtins_pytd = None # ... => pytype.pytd.pytd.TypeDeclUnit
def Precompile(f):
"""Write precompiled builtins to the specified file."""
data = GetBuiltinsAndTyping()
# Pickling builtins tends to bump up against the recursion limit. Increase
# it temporarily here. If "RuntimeError: maximum recursion depth exceeded"
# is seen during pickling, this limit may need to be increased further.
old_limit = sys.getrecursionlimit()
sys.setrecursionlimit(20000)
cPickle.dump(data, f, protocol=2)
sys.setrecursionlimit(old_limit)
def LoadPrecompiled(f):
"""Load precompiled builtins from the specified f."""
global _cached_builtins_pytd
assert _cached_builtins_pytd is None
_cached_builtins_pytd = cPickle.load(f)
def GetBuiltinsAndTyping():
"""Get __builtin__.pytd and typing.pytd."""
global _cached_builtins_pytd
if not _cached_builtins_pytd:
t = parser.parse_string(_FindBuiltinFile("typing"), name="typing")
b = parser.parse_string(_FindBuiltinFile("__builtin__"),
name="__builtin__")
b = b.Visit(visitors.NamedTypeToClassType())
b = b.Visit(visitors.LookupExternalTypes({"typing": t}, full_names=True,
self_name="__builtin__"))
t = t.Visit(visitors.LookupBuiltins(b))
t = t.Visit(visitors.NamedTypeToClassType())
b = b.Visit(visitors.AdjustTypeParameters())
t = t.Visit(visitors.AdjustTypeParameters())
b.Visit(visitors.FillInModuleClasses({"": b, "typing": t,
"__builtin__": b}))
t.Visit(visitors.FillInModuleClasses({"": t, "typing": t,
"__builtin__": b}))
b.Visit(visitors.VerifyLookup())
t.Visit(visitors.VerifyLookup())
b.Visit(visitors.VerifyContainers())
t.Visit(visitors.VerifyContainers())
_cached_builtins_pytd = b, t
return _cached_builtins_pytd
def GetBuiltinsPyTD():
"""Get the "default" AST used to lookup built in types.
Get an AST for all Python builtins as well as the most commonly used standard
libraries.
Returns:
A pytd.TypeDeclUnit instance. It'll directly contain the builtin classes
and functions, and submodules for each of the standard library modules.
"""
return utils.Concat(*GetBuiltinsAndTyping())
# TODO(kramm): Use python_version, once we have builtins for both Python 2 and
# Python 3.
def GetBuiltinsCode(unused_python_version):
"""Similar to GetBuiltinsPyTD, but for code in the .py file."""
return _FindBuiltinFile("__builtin__", extension=".py")
def ParsePyTD(src=None, filename=None, python_version=None, module=None,
lookup_classes=False):
"""Parse pytd sourcecode and do name lookup for builtins.
This loads a pytd and also makes sure that all names are resolved (i.e.,
that all primitive types in the AST are ClassType, and not NameType).
Args:
src: PyTD source code.
filename: The filename the source code is from.
python_version: The Python version to parse the pytd for.
module: The name of the module we're parsing.
lookup_classes: If we should also lookup the class of every ClassType.
Returns:
A pytd.TypeDeclUnit.
"""
assert python_version
if src is None:
with open(filename, "rb") as fi:
src = fi.read()
ast = parser.parse_string(src, filename=filename, name=module,
python_version=python_version)
if lookup_classes:
ast = visitors.LookupClasses(ast, GetBuiltinsPyTD())
return ast
def ParsePredefinedPyTD(pytd_subdir, module, python_version):
"""Load and parse a *.pytd from "pytd/{pytd_subdir}/{module}.pytd".
Args:
pytd_subdir: the directory where the module should be found
module: the module name (without any file extension)
python_version: sys.version_info[:2]
Returns:
The AST of the module; None if the module doesn't exist in pytd_subdir.
"""
try:
src = utils.GetPredefinedFile(pytd_subdir, module)
except IOError:
return None
return ParsePyTD(src, filename=os.path.join(pytd_subdir, module + ".pytd"),
module=module,
python_version=python_version).Replace(name=module)
# pyi for a catch-all module
DEFAULT_SRC = """
from typing import Any
def __getattr__(name) -> Any: ...
"""
def GetDefaultAst(python_version):
return ParsePyTD(src=DEFAULT_SRC,
python_version=python_version, lookup_classes=True)
```
#### File: pytype/pytd/pytd_test.py
```python
import itertools
import textwrap
import unittest
from pytype.pyi import parser
from pytype.pytd import pytd
from pytype.pytd.parse import visitors
class TestPytd(unittest.TestCase):
"""Test the simple functionality in pytd.py."""
def setUp(self):
self.int = pytd.ClassType("int")
self.none_type = pytd.ClassType("NoneType")
self.float = pytd.ClassType("float")
self.list = pytd.ClassType("list")
def testUnionTypeEq(self):
u1 = pytd.UnionType((self.int, self.float))
u2 = pytd.UnionType((self.float, self.int))
self.assertEqual(u1, u2)
self.assertEqual(u2, u1)
self.assertEqual(u1.type_list, (self.int, self.float))
self.assertEqual(u2.type_list, (self.float, self.int))
def testUnionTypeNe(self):
u1 = pytd.UnionType((self.int, self.float))
u2 = pytd.UnionType((self.float, self.int, self.none_type))
self.assertNotEqual(u1, u2)
self.assertNotEqual(u2, u1)
self.assertEqual(u1.type_list, (self.int, self.float))
self.assertEqual(u2.type_list, (self.float, self.int, self.none_type))
def testOrder(self):
# pytd types' primary sort key is the class name, second sort key is
# the contents when interpreted as a (named)tuple.
nodes = [pytd.AnythingType(),
pytd.GenericType(self.list, (self.int,)),
pytd.NamedType("int"),
pytd.NothingType(),
pytd.UnionType((self.float,)),
pytd.UnionType((self.int,))]
for n1, n2 in zip(nodes[:-1], nodes[1:]):
self.assertLess(n1, n2)
self.assertLessEqual(n1, n2)
self.assertGreater(n2, n1)
self.assertGreaterEqual(n2, n1)
for p in itertools.permutations(nodes):
self.assertEquals(list(sorted(p)), nodes)
def testASTeq(self):
# This creates two ASts that are equivalent but whose sources are slightly
# different. The union types are different (int,str) vs (str,int) but the
# ordering is ignored when testing for equality (which ASTeq uses).
src1 = textwrap.dedent("""
def foo(a: int or str) -> C
T = TypeVar('T')
class C(typing.Generic[T], object):
def bar(x: T) -> NoneType
CONSTANT = ... # type: C[float]
""")
src2 = textwrap.dedent("""
CONSTANT = ... # type: C[float]
T = TypeVar('T')
class C(typing.Generic[T], object):
def bar(x: T) -> NoneType
def foo(a: str or int) -> C
""")
tree1 = parser.parse_string(src1)
tree2 = parser.parse_string(src2)
tree1.Visit(visitors.VerifyVisitor())
tree2.Visit(visitors.VerifyVisitor())
self.assertTrue(tree1.constants)
self.assertTrue(tree1.classes)
self.assertTrue(tree1.functions)
self.assertTrue(tree2.constants)
self.assertTrue(tree2.classes)
self.assertTrue(tree2.functions)
self.assertIsInstance(tree1, pytd.TypeDeclUnit)
self.assertIsInstance(tree2, pytd.TypeDeclUnit)
# For the ==, != tests, TypeDeclUnit uses identity
self.assertTrue(tree1 == tree1)
self.assertTrue(tree2 == tree2)
self.assertFalse(tree1 == tree2)
self.assertFalse(tree2 == tree1)
self.assertFalse(tree1 != tree1)
self.assertFalse(tree2 != tree2)
self.assertTrue(tree1 != tree2)
self.assertTrue(tree2 != tree1)
self.assertEquals(tree1, tree1)
self.assertEquals(tree2, tree2)
self.assertNotEquals(tree1, tree2)
self.assertTrue(tree1.ASTeq(tree2))
self.assertTrue(tree1.ASTeq(tree1))
self.assertTrue(tree2.ASTeq(tree1))
self.assertTrue(tree2.ASTeq(tree2))
if __name__ == "__main__":
unittest.main()
```
#### File: pytype/pytd/utils.py
```python
import collections
import os
from pytype.pyi import parser
from pytype.pytd import abc_hierarchy
from pytype.pytd import pytd
from pytype.pytd.parse import visitors
import pytype.utils
def UnpackUnion(t):
"""Return the type list for union type, or a list with the type itself."""
if isinstance(t, pytd.UnionType):
return t.type_list
else:
return [t]
def MakeClassOrContainerType(base_type, type_arguments, homogeneous):
"""If we have type params, build a generic type, a normal type otherwise."""
if homogeneous:
assert len(type_arguments) == 1
return pytd.HomogeneousContainerType(base_type, tuple(type_arguments))
elif base_type.name in ("__builtin__.tuple", "typing.Tuple"):
return pytd.TupleType(base_type, tuple(type_arguments))
elif not type_arguments:
return base_type
else:
return pytd.GenericType(base_type, tuple(type_arguments))
def Concat(*args, **kwargs):
"""Concatenate two or more pytd ASTs."""
assert all(isinstance(arg, pytd.TypeDeclUnit) for arg in args)
name = kwargs.get("name")
return pytd.TypeDeclUnit(
name=name or " + ".join(arg.name for arg in args),
constants=sum((arg.constants for arg in args), ()),
type_params=sum((arg.type_params for arg in args), ()),
classes=sum((arg.classes for arg in args), ()),
functions=sum((arg.functions for arg in args), ()),
aliases=sum((arg.aliases for arg in args), ()))
def JoinTypes(types):
"""Combine a list of types into a union type, if needed.
Leaves singular return values alone, or wraps a UnionType around them if there
are multiple ones, or if there are no elements in the list (or only
NothingType) return NothingType.
Arguments:
types: A list of types. This list might contain other UnionTypes. If
so, they are flattened.
Returns:
A type that represents the union of the types passed in. Order is preserved.
"""
queue = collections.deque(types)
seen = set()
new_types = []
while queue:
t = queue.popleft()
if isinstance(t, pytd.UnionType):
queue.extendleft(reversed(t.type_list))
elif isinstance(t, pytd.NothingType):
pass
elif t not in seen:
new_types.append(t)
seen.add(t)
if len(new_types) == 1:
return new_types.pop()
elif any(isinstance(t, pytd.AnythingType) for t in new_types):
return pytd.AnythingType()
elif new_types:
return pytd.UnionType(tuple(new_types)) # tuple() to make unions hashable
else:
return pytd.NothingType()
# pylint: disable=invalid-name
def prevent_direct_instantiation(cls, *args, **kwargs):
"""Mix-in method for creating abstract (base) classes.
Use it like this to prevent instantiation of classes:
class Foo(object):
__new__ = prevent_direct_instantiation
This will apply to the class itself, not its subclasses, so it can be used to
create base classes that are abstract, but will become concrete once inherited
from.
Arguments:
cls: The class to instantiate, passed to __new__.
*args: Additional arguments, passed to __new__.
**kwargs: Additional keyword arguments, passed to __new__.
Returns:
A new instance.
Raises:
AssertionError: If something tried to instantiate the base class.
"""
new = cls.__dict__.get("__new__")
if getattr(new, "__func__", None) == prevent_direct_instantiation:
raise AssertionError("Can't instantiate %s directly" % cls.__name__)
return object.__new__(cls, *args, **kwargs)
def disabled_function(*unused_args, **unused_kwargs):
"""Disable a function.
Disable a previously defined function foo as follows:
foo = disabled_function
Any later calls to foo will raise an AssertionError. This is used, e.g.,
in cfg.Program to prevent the addition of more nodes after we have begun
solving the graph.
Raises:
AssertionError: If something tried to call the disabled function.
"""
raise AssertionError("Cannot call disabled function.")
class TypeMatcher(object):
"""Base class for modules that match types against each other.
Maps pytd node types (<type1>, <type2>) to a method "match_<type1>_<type2>".
So e.g. to write a matcher that compares Functions by name, you would write:
class MyMatcher(TypeMatcher):
def match_Function_Function(self, f1, f2):
return f1.name == f2.name
"""
def default_match(self, t1, t2):
return t1 == t2
def match(self, t1, t2, *args, **kwargs):
name1 = t1.__class__.__name__
name2 = t2.__class__.__name__
f = getattr(self, "match_" + name1 + "_against_" + name2, None)
if f:
return f(t1, t2, *args, **kwargs)
else:
return self.default_match(t1, t2, *args, **kwargs)
def CanonicalOrdering(n, sort_signatures=False):
"""Convert a PYTD node to a canonical (sorted) ordering."""
# TODO(pludemann): use the original .py to decide the ordering rather
# than an arbitrary sort order
return n.Visit(
visitors.CanonicalOrderingVisitor(sort_signatures=sort_signatures))
def GetAllSubClasses(ast):
"""Compute a class->subclasses mapping.
Args:
ast: Parsed PYTD.
Returns:
A dictionary, mapping instances of pytd.TYPE (types) to lists of
pytd.Class (the derived classes).
"""
hierarchy = ast.Visit(visitors.ExtractSuperClasses())
hierarchy = {cls: [superclass for superclass in superclasses]
for cls, superclasses in hierarchy.items()}
return abc_hierarchy.Invert(hierarchy)
def Print(ast):
return ast.Visit(visitors.PrintVisitor())
def EmptyModule(name="<empty>"):
return pytd.TypeDeclUnit(
name, type_params=(), constants=(), classes=(), functions=(), aliases=())
def WrapTypeDeclUnit(name, items):
"""Given a list (classes, functions, etc.), wrap a pytd around them.
Args:
name: The name attribute of the resulting TypeDeclUnit.
items: A list of items. Can contain pytd.Class, pytd.Function and
pytd.Constant.
Returns:
A pytd.TypeDeclUnit.
Raises:
ValueError: In case of an invalid item in the list.
NameError: For name conflicts.
"""
functions = collections.OrderedDict()
classes = collections.OrderedDict()
constants = collections.defaultdict(TypeBuilder)
aliases = collections.OrderedDict()
for item in items:
if isinstance(item, pytd.Function):
if item.name in functions:
if item.kind != functions[item.name].kind:
raise ValueError("Can't combine %s and %s", item.kind,
functions[item.name].kind)
functions[item.name] = pytd.Function(
item.name, functions[item.name].signatures + item.signatures,
item.kind)
else:
functions[item.name] = item
elif isinstance(item, pytd.Class):
if item.name in classes:
raise NameError("Duplicate top level class: %r", item.name)
classes[item.name] = item
elif isinstance(item, pytd.Constant):
constants[item.name].add_type(item.type)
elif isinstance(item, pytd.Alias):
if item.name in aliases:
raise NameError("Duplicate top level alias or import: %r", item.name)
aliases[item.name] = item
else:
raise ValueError("Invalid top level pytd item: %r" % type(item))
_check_intersection(functions, classes, "function", "class")
_check_intersection(functions, constants, "functions", "constant")
_check_intersection(functions, aliases, "functions", "aliases")
_check_intersection(classes, constants, "class", "constant")
_check_intersection(classes, aliases, "class", "alias")
_check_intersection(constants, aliases, "constant", "alias")
return pytd.TypeDeclUnit(
name=name,
constants=tuple(
pytd.Constant(name, t.build())
for name, t in sorted(constants.items())),
type_params=tuple(),
classes=tuple(classes.values()),
functions=tuple(functions.values()),
aliases=tuple(aliases.values()))
def _check_intersection(items1, items2, name1, name2):
items = set(items1) & set(items2)
if items:
if len(items) == 1:
raise NameError("Top level identifier %r is both %s and %s" %
(list(items)[0], name1, name2))
max_items = 5 # an arbitrary value
if len(items) > max_items:
raise NameError("Top level identifiers %s, ... are both %s and %s" %
(", ".join(map(repr, sorted(items[:max_items]))), name1,
name2))
raise NameError("Top level identifiers %s are both %s and %s" %
(", ".join(map(repr, sorted(items))), name1, name2))
class TypeBuilder(object):
"""Utility class for building union types."""
def __init__(self):
self.union = pytd.NothingType()
def add_type(self, other):
"""Add a new pytd type to the types represented by this TypeBuilder."""
self.union = JoinTypes([self.union, other])
def build(self):
"""Get a union of all the types added so far."""
return self.union
def __nonzero__(self):
return not isinstance(self.union, pytd.NothingType)
def NamedOrClassType(name, cls):
"""Create Classtype / NamedType."""
if cls is None:
return pytd.NamedType(name)
else:
return pytd.ClassType(name, cls)
def NamedTypeWithModule(name, module=None):
"""Create NamedType, dotted if we have a module."""
if module is None:
return pytd.NamedType(name)
else:
return pytd.NamedType(module + "." + name)
class OrderedSet(collections.OrderedDict):
"""A simple ordered set."""
def __init__(self, iterable=None):
super(OrderedSet, self).__init__((item, None) for item in (iterable or []))
def add(self, item):
self[item] = None
def WrapsDict(member_name, writable=False, implement_len=False):
"""Returns a mixin class for wrapping a dictionary.
This can be used like this:
class MyClass(WrapsDict("inner_dict")):
def __init__(self):
self.inner_dict = {}
The resulting class will delegate all dictionary operations to inner_dict.
Args:
member_name: Name of the attribute that contains the wrapped dictionary.
writable: Whether to implement operations that modify the dict, like "del".
implement_len: Whether the parent class should have a __len__ method that
maps to the inner dictionary.
Returns:
A type.
"""
src = "if True:\n" # To allow the code below to be indented
src += """
class WrapsDict(object):
def __getitem__(self, key):
return self.{member_name}[key]
def get(self, key, default=None):
return self.{member_name}.get(key, default)
def __contains__(self, key):
return key in self.{member_name}
def has_key(self, key):
return self.{member_name}.has_key(key)
def copy(self):
return self.{member_name}.copy()
def __iter__(self):
return iter(self.{member_name})
def items(self):
return self.{member_name}.items()
def iteritems(self):
return self.{member_name}.iteritems()
def iterkeys(self):
return self.{member_name}.iterkeys()
def itervalues(self):
return self.{member_name}.itervalues()
def keys(self):
return self.{member_name}.keys()
def values(self):
return self.{member_name}.values()
def viewitems(self):
return self.{member_name}.viewitems()
def viewkeys(self):
return self.{member_name}.viewkeys()
def viewvalues(self):
return self.{member_name}.viewvalues()
""".format(member_name=member_name)
if writable:
src += """
def pop(self, key):
return self.{member_name}.pop(key)
def popitem(self):
return self.{member_name}.popitem()
def setdefault(self, key, value=None):
return self.{member_name}.setdefault(key, value)
def update(self, other_dict):
return self.{member_name}.update(other_dict)
def clear(self):
return self.{member_name}.clear()
def __setitem__(self, key, value):
self.{member_name}[key] = value
def __delitem__(self, key):
del self.{member_name}[key]
""".format(member_name=member_name)
if implement_len:
src += """
def __len__(self):
return len(self.{member_name})
""".format(member_name=member_name)
namespace = {}
exec src in namespace # pylint: disable=exec-used
return namespace["WrapsDict"]
def Dedup(seq):
"""Return a sequence in the same order, but with duplicates removed."""
seen = set()
result = []
for s in seq:
if s not in seen:
result.append(s)
seen.add(s)
return result
class MROError(Exception):
def __init__(self, seqs):
super(MROError, self).__init__()
self.mro_seqs = seqs
def MROMerge(input_seqs):
"""Merge a sequence of MROs into a single resulting MRO.
Args:
input_seqs: A sequence of MRO sequences.
Returns:
A single resulting MRO.
Raises:
MROError: If we discovered an illegal inheritance.
"""
seqs = [Dedup(s) for s in input_seqs]
try:
return visitors.MergeSequences(seqs)
except ValueError:
raise MROError(input_seqs)
def _GetClass(t, lookup_ast):
if t.cls:
return t.cls
if lookup_ast:
return lookup_ast.Lookup(t.name)
raise AttributeError("Class not found: %s" % t.name)
def _Degenerify(types):
return [t.base_type if isinstance(t, pytd.GenericType) else t for t in types]
def _ComputeMRO(t, mros, lookup_ast):
if isinstance(t, pytd.ClassType):
if t not in mros:
mros[t] = None
parent_mros = []
for parent in _GetClass(t, lookup_ast).parents:
if parent in mros:
if mros[parent] is None:
raise MROError([[t]])
else:
parent_mro = mros[parent]
else:
parent_mro = _ComputeMRO(parent, mros, lookup_ast)
parent_mros.append(parent_mro)
mros[t] = tuple(
MROMerge([[t]] + parent_mros + [_Degenerify(
_GetClass(t, lookup_ast).parents)]))
return mros[t]
elif isinstance(t, pytd.GenericType):
return _ComputeMRO(t.base_type, mros, lookup_ast)
else:
return [t]
def GetBasesInMRO(cls, lookup_ast=None):
"""Get the given class's bases in Python's method resolution order."""
mros = {}
parent_mros = []
for p in cls.parents:
parent_mros.append(_ComputeMRO(p, mros, lookup_ast))
return tuple(MROMerge(parent_mros + [_Degenerify(cls.parents)]))
def canonical_pyi(pyi):
ast = parser.parse_string(pyi)
ast = ast.Visit(visitors.ClassTypeToNamedType())
ast = ast.Visit(visitors.CanonicalOrderingVisitor(sort_signatures=True))
ast.Visit(visitors.VerifyVisitor())
return pytd.Print(ast)
def GetPredefinedFile(pytd_subdir, module, extension=".pytd"):
"""Get the contents of a predefined PyTD, typically with a file name *.pytd.
Arguments:
pytd_subdir: the directory, typically "builtins" or "stdlib"
module: module name (e.g., "sys" or "__builtins__")
extension: either ".pytd" or ".py"
Returns:
The contents of the file
Raises:
IOError: if file not found
"""
path = os.path.join("pytd", pytd_subdir,
os.path.join(*module.split(".")) + extension)
return pytype.utils.load_pytype_file(path)
```
#### File: pytype/pytd/utils_test.py
```python
import os
import textwrap
import unittest
from pytype.pyi import parser
from pytype.pytd import pytd
from pytype.pytd import utils
from pytype.pytd.parse import builtins
from pytype.pytd.parse import parser_test_base
from pytype.pytd.parse import visitors
class TestUtils(parser_test_base.ParserTest):
"""Test pytype.pytd.utils."""
def testUnpackUnion(self):
"""Test for UnpackUnion."""
ast = self.Parse("""
c1 = ... # type: int or float
c2 = ... # type: int
c3 = ... # type: list[int or float]""")
c1 = ast.Lookup("c1").type
c2 = ast.Lookup("c2").type
c3 = ast.Lookup("c3").type
self.assertItemsEqual(utils.UnpackUnion(c1), c1.type_list)
self.assertItemsEqual(utils.UnpackUnion(c2), [c2])
self.assertItemsEqual(utils.UnpackUnion(c3), [c3])
def testConcat(self):
"""Test for concatenating two pytd ASTs."""
ast1 = self.Parse("""
c1 = ... # type: int
def f1() -> int
class Class1(object):
pass
""")
ast2 = self.Parse("""
c2 = ... # type: int
def f2() -> int
class Class2(object):
pass
""")
expected = textwrap.dedent("""
c1 = ... # type: int
c2 = ... # type: int
def f1() -> int
def f2() -> int
class Class1(object):
pass
class Class2(object):
pass
""")
combined = utils.Concat(ast1, ast2)
self.AssertSourceEquals(combined, expected)
def testConcat3(self):
"""Test for concatenating three pytd ASTs."""
ast1 = self.Parse("""c1 = ... # type: int""")
ast2 = self.Parse("""c2 = ... # type: float""")
ast3 = self.Parse("""c3 = ... # type: bool""")
combined = utils.Concat(ast1, ast2, ast3)
expected = textwrap.dedent("""
c1 = ... # type: int
c2 = ... # type: float
c3 = ... # type: bool
""")
self.AssertSourceEquals(combined, expected)
def testConcatTypeParameters(self):
"""Test for concatenating ASTs with type parameters."""
ast1 = self.Parse("""T = TypeVar("T")""", name="__builtin__")
ast2 = self.Parse("""T = TypeVar("T")""")
combined = utils.Concat(ast1, ast2)
self.assertEquals(combined.Lookup("__builtin__.T"),
pytd.TypeParameter("T", scope="__builtin__"))
self.assertEquals(combined.Lookup("T"), pytd.TypeParameter("T", scope=None))
def testJoinTypes(self):
"""Test that JoinTypes() does recursive flattening."""
n1, n2, n3, n4, n5, n6 = [pytd.NamedType("n%d" % i) for i in xrange(6)]
# n1 or (n2 or (n3))
nested1 = pytd.UnionType((n1, pytd.UnionType((n2, pytd.UnionType((n3,))))))
# ((n4) or n5) or n6
nested2 = pytd.UnionType((pytd.UnionType((pytd.UnionType((n4,)), n5)), n6))
joined = utils.JoinTypes([nested1, nested2])
self.assertEquals(joined.type_list,
(n1, n2, n3, n4, n5, n6))
def testJoinSingleType(self):
"""Test that JoinTypes() returns single types as-is."""
a = pytd.NamedType("a")
self.assertEquals(utils.JoinTypes([a]), a)
self.assertEquals(utils.JoinTypes([a, a]), a)
def testJoinNothingType(self):
"""Test that JoinTypes() removes or collapses 'nothing'."""
a = pytd.NamedType("a")
nothing = pytd.NothingType()
self.assertEquals(utils.JoinTypes([a, nothing]), a)
self.assertEquals(utils.JoinTypes([nothing]), nothing)
self.assertEquals(utils.JoinTypes([nothing, nothing]), nothing)
def testJoinEmptyTypesToNothing(self):
"""Test that JoinTypes() simplifies empty unions to 'nothing'."""
self.assertIsInstance(utils.JoinTypes([]), pytd.NothingType)
def testJoinAnythingTypes(self):
"""Test that JoinTypes() simplifies unions containing '?'."""
types = [pytd.AnythingType(), pytd.NamedType("a")]
self.assertIsInstance(utils.JoinTypes(types), pytd.AnythingType)
def testTypeMatcher(self):
"""Test for the TypeMatcher class."""
class MyTypeMatcher(utils.TypeMatcher):
def default_match(self, t1, t2, mykeyword):
assert mykeyword == "foobar"
return t1 == t2
def match_Function_against_Function(self, f1, f2, mykeyword):
assert mykeyword == "foobar"
return all(self.match(sig1, sig2, mykeyword)
for sig1, sig2 in zip(f1.signatures, f2.signatures))
s1 = pytd.Signature((), None, None, pytd.NothingType(), (), ())
s2 = pytd.Signature((), None, None, pytd.AnythingType(), (), ())
self.assertTrue(MyTypeMatcher().match(
pytd.Function("f1", (s1, s2), pytd.METHOD),
pytd.Function("f2", (s1, s2), pytd.METHOD),
mykeyword="foobar"))
self.assertFalse(MyTypeMatcher().match(
pytd.Function("f1", (s1, s2), pytd.METHOD),
pytd.Function("f2", (s2, s2), pytd.METHOD),
mykeyword="foobar"))
def testPrint(self):
"""Smoketest for printing pytd."""
ast = self.Parse("""
c1 = ... # type: int
T = TypeVar('T')
class A(typing.Generic[T], object):
bar = ... # type: T
def foo(self, x: list[int], y: T) -> list[T] or float raises ValueError
X = TypeVar('X')
Y = TypeVar('Y')
def bar(x: X or Y) -> ?
""")
# TODO(kramm): Do more extensive testing.
utils.Print(ast)
def testNamedTypeWithModule(self):
"""Test NamedTypeWithModule()."""
self.assertEquals(utils.NamedTypeWithModule("name"), pytd.NamedType("name"))
self.assertEquals(utils.NamedTypeWithModule("name", None),
pytd.NamedType("name"))
self.assertEquals(utils.NamedTypeWithModule("name", "package"),
pytd.NamedType("package.name"))
def testOrderedSet(self):
ordered_set = utils.OrderedSet(n/2 for n in range(10))
ordered_set.add(-42)
ordered_set.add(3)
self.assertEquals(tuple(ordered_set), (0, 1, 2, 3, 4, -42))
def testWrapTypeDeclUnit(self):
"""Test WrapTypeDeclUnit."""
ast1 = self.Parse("""
c = ... # type: int
def f(x: int) -> int
def f(x: float) -> float
class A(object):
pass
""")
ast2 = self.Parse("""
c = ... # type: float
d = ... # type: int
def f(x: complex) -> complex
class B(object):
pass
""")
w = utils.WrapTypeDeclUnit(
"combined",
ast1.classes + ast1.functions + ast1.constants +
ast2.classes + ast2.functions + ast2.constants)
expected = textwrap.dedent("""
c = ... # type: int or float
d = ... # type: int
def f(x: int) -> int
def f(x: float) -> float
def f(x: complex) -> complex
class A(object):
pass
class B(object):
pass
""")
self.AssertSourceEquals(w, expected)
def testWrapsDict(self):
class A(utils.WrapsDict("m")):
pass
a = A()
a.m = {}
a.m = {"foo": 1, "bar": 2}
self.assertEquals(a.get("x", "baz"), "baz")
self.assertFalse("x" in a)
self.assertEquals(a.get("foo"), 1)
self.assertEquals(a["foo"], 1)
self.assertTrue(a.has_key("foo"))
self.assertTrue("foo" in a)
self.assertTrue("bar" in a)
self.assertEquals(a.copy(), a.m)
self.assertItemsEqual(iter(a), ["foo", "bar"])
self.assertItemsEqual(a.keys(), ["foo", "bar"])
self.assertItemsEqual(a.viewkeys(), ["foo", "bar"])
self.assertItemsEqual(a.iterkeys(), ["foo", "bar"])
self.assertItemsEqual(a.values(), [1, 2])
self.assertItemsEqual(a.viewvalues(), [1, 2])
self.assertItemsEqual(a.itervalues(), [1, 2])
self.assertItemsEqual(a.items(), [("foo", 1), ("bar", 2)])
self.assertItemsEqual(a.viewitems(), [("foo", 1), ("bar", 2)])
self.assertItemsEqual(a.iteritems(), [("foo", 1), ("bar", 2)])
self.assertFalse(hasattr(a, "popitem"))
def testWrapsWritableDict(self):
class A(utils.WrapsDict("m", writable=True)):
pass
a = A()
a.m = {}
a.m = {"foo": 1, "bar": 2}
self.assertTrue(a.has_key("foo"))
self.assertTrue(a.has_key("bar"))
del a["foo"]
a["bar"] = 3
self.assertFalse(a.has_key("foo"))
self.assertTrue(a.has_key("bar"))
value = a.pop("bar")
self.assertEquals(3, value)
self.assertFalse(a.has_key("bar"))
a["new"] = 7
item = a.popitem()
self.assertEquals(item, ("new", 7))
a["1"] = 1
a.setdefault("1", 11)
a.setdefault("2", 22)
self.assertEquals(a["1"], 1)
self.assertEquals(a["2"], 22)
a.update({"3": 33})
self.assertItemsEqual(a.items(), (("1", 1), ("2", 22), ("3", 33)))
a.clear()
self.assertItemsEqual(a.items(), ())
def testWrapsDictWithLength(self):
class A(utils.WrapsDict("m", implement_len=True)):
pass
a = A()
a.m = {x: x for x in range(42)}
self.assertEquals(42, len(a))
def testDedup(self):
self.assertEquals([], utils.Dedup([]))
self.assertEquals([1], utils.Dedup([1]))
self.assertEquals([1, 2], utils.Dedup([1, 2]))
self.assertEquals([1, 2], utils.Dedup([1, 2, 1]))
self.assertEquals([1, 2], utils.Dedup([1, 1, 2, 2]))
self.assertEquals([3, 2, 1], utils.Dedup([3, 2, 1, 3]))
def testMROMerge(self):
self.assertEquals([], utils.MROMerge([[], []]))
self.assertEquals([1], utils.MROMerge([[], [1]]))
self.assertEquals([1], utils.MROMerge([[1], []]))
self.assertEquals([1, 2], utils.MROMerge([[1], [2]]))
self.assertEquals([1, 2], utils.MROMerge([[1, 2], [2]]))
self.assertEquals([1, 2, 3, 4], utils.MROMerge([[1, 2, 3], [2, 4]]))
self.assertEquals([1, 2, 3], utils.MROMerge([[1, 2], [1, 2, 3]]))
self.assertEquals([1, 2], utils.MROMerge([[1, 1], [2, 2]]))
self.assertEquals([1, 2, 3, 4, 5, 6],
utils.MROMerge([[1, 3, 5], [2, 3, 4], [4, 5, 6]]))
self.assertEquals([1, 2, 3], utils.MROMerge([[1, 2, 1], [2, 3, 2]]))
def testGetBasesInMRO(self):
ast = parser.parse_string(textwrap.dedent("""
from typing import Generic, TypeVar
T = TypeVar("T")
class Foo(Generic[T]): pass
class Bar(Foo[int]): pass
"""))
b, t = builtins.GetBuiltinsAndTyping()
ast = ast.Visit(visitors.LookupExternalTypes(
{"__builtin__": b, "typing": t}, full_names=True))
ast = ast.Visit(visitors.NamedTypeToClassType())
mro = utils.GetBasesInMRO(ast.Lookup("Bar"), lookup_ast=ast)
self.assertListEqual(["Foo", "typing.Generic", "__builtin__.object"],
[t.name for t in mro])
def testBuiltinAlias(self):
src = "Number = int"
ast = parser.parse_string(src)
self.assertMultiLineEqual(utils.Print(ast), src)
def testTypingNameConflict1(self):
src = textwrap.dedent("""
import typing
x = ... # type: typing.List[str]
def List() -> None: ...
""")
ast = parser.parse_string(src)
self.assertMultiLineEqual(utils.Print(ast).strip("\n"), src.strip("\n"))
def testTypingNameConflict2(self):
ast = parser.parse_string(textwrap.dedent("""
import typing
from typing import Any
x = ... # type: typing.List[str]
class MyClass(object):
List = ... # type: Any
x = ... # type: typing.List[str]
"""))
expected = textwrap.dedent("""
import typing
from typing import Any, List
x = ... # type: List[str]
class MyClass(object):
List = ... # type: Any
x = ... # type: typing.List[str]
""")
self.assertMultiLineEqual(utils.Print(ast).strip("\n"),
expected.strip("\n"))
class TestDataFiles(parser_test_base.ParserTest):
"""Test utils.GetPredefinedFile()."""
def testGetPredefinedFileReturnsString(self):
# smoke test, only checks that it doesn't throw and the result is a string
self.assertIsInstance(
utils.GetPredefinedFile("builtins", "__builtin__"),
str)
def testGetPredefinedFileThrows(self):
# smoke test, only checks that it does throw
with self.assertRaisesRegexp(
IOError,
r"File not found|Resource not found|No such file or directory"):
utils.GetPredefinedFile("builtins", "-this-file-does-not-exist")
def testPytdBuiltin(self):
"""Verify 'import sys'."""
import_contents = utils.GetPredefinedFile("builtins", "sys")
with open(os.path.join(os.path.dirname(pytd.__file__),
"builtins", "sys.pytd"), "rb") as fi:
file_contents = fi.read()
self.assertMultiLineEqual(import_contents, file_contents)
if __name__ == "__main__":
unittest.main()
```
#### File: pytype/pytype/state_test.py
```python
from pytype import state
from pytype.pytd import cfg
import unittest
def source_summary(binding, **varnames):
"""A simple deterministic listing of source variables."""
clauses = []
name_map = {b.variable: name for name, b in varnames.items()}
for origin in binding.origins:
for sources in origin.source_sets:
bindings = ["%s=%s" % (name_map[b.variable], b.data) for b in sources]
clauses.append(" ".join(sorted(bindings)))
return " | ".join(sorted(clauses))
class FakeValue(object):
def __init__(self, name, true_compat, false_compat):
self._name = name
self._compatible = {
True: true_compat,
False: false_compat}
def compatible_with(self, logical_value):
return self._compatible[logical_value]
def __str__(self):
return self._name
ONLY_TRUE = FakeValue("T", True, False)
ONLY_FALSE = FakeValue("F", False, True)
AMBIGUOUS = FakeValue("?", True, True)
class ConditionTestBase(unittest.TestCase):
def setUp(self):
self._program = cfg.Program()
self._node = self._program.NewCFGNode("test")
def new_binding(self, value=AMBIGUOUS):
var = self._program.NewVariable()
return var.AddBinding(value)
def check_binding(self, expected, binding, **varnames):
self.assertEquals(1, len(binding.origins))
self.assertEquals(self._node, binding.origins[0].where)
self.assertEquals(expected, source_summary(binding, **varnames))
class ConditionTest(ConditionTestBase):
def test_no_parent(self):
x = self.new_binding()
y = self.new_binding()
z = self.new_binding()
c = state.Condition(self._node, [[x, y], [z]])
self.check_binding("x=? y=? | z=?", c.binding, x=x, y=y, z=z)
def test_parent_combination(self):
p = self.new_binding()
x = self.new_binding()
y = self.new_binding()
z = self.new_binding()
c = state.Condition(self._node, [[x, y], [z]])
self.check_binding("x=? y=? | z=?", c.binding,
p=p, x=x, y=y, z=z)
class SplitConditionTest(ConditionTestBase):
def test(self):
# Test that we split both sides and that everything gets passed through
# correctly. Don't worry about special cases within _restrict_condition
# since those are tested separately.
self.new_binding()
var = self._program.NewVariable()
var.AddBinding(ONLY_TRUE)
var.AddBinding(ONLY_FALSE)
var.AddBinding(AMBIGUOUS)
true_cond, false_cond = state.split_conditions(self._node, var)
self.check_binding("v=? | v=T", true_cond.binding,
v=var.bindings[0])
self.check_binding("v=? | v=F",
false_cond.binding,
v=var.bindings[0])
class RestrictConditionTest(ConditionTestBase):
def setUp(self):
super(RestrictConditionTest, self).setUp()
p = self.new_binding()
self._parent = state.Condition(self._node, [[p]])
def test_no_bindings(self):
c = state._restrict_condition(self._node, [], False)
self.assertIs(state.UNSATISFIABLE, c)
c = state._restrict_condition(self._node, [], True)
self.assertIs(state.UNSATISFIABLE, c)
def test_none_restricted(self):
x = self.new_binding()
y = self.new_binding()
state._restrict_condition(self._node, [x, y], False)
state._restrict_condition(self._node, [x, y], True)
def test_all_restricted(self):
x = self.new_binding(ONLY_FALSE)
y = self.new_binding(ONLY_FALSE)
c = state._restrict_condition(self._node, [x, y], True)
self.assertIs(state.UNSATISFIABLE, c)
def test_some_restricted_no_parent(self):
x = self.new_binding() # Can be true or false.
y = self.new_binding(ONLY_FALSE)
z = self.new_binding() # Can be true or false.
c = state._restrict_condition(self._node, [x, y, z], True)
self.check_binding("x=? | z=?", c.binding, x=x, y=y, z=z)
def test_some_restricted_with_parent(self):
x = self.new_binding() # Can be true or false.
y = self.new_binding(ONLY_FALSE)
z = self.new_binding() # Can be true or false.
c = state._restrict_condition(self._node, [x, y, z], True)
self.check_binding("x=? | z=?", c.binding,
x=x, y=y, z=z)
def test_restricted_to_dnf(self):
# DNF for a | (b & c)
a = self.new_binding()
b = self.new_binding()
c = self.new_binding()
dnf = [[a],
[b, c]]
x = self.new_binding() # Compatible with everything
y = self.new_binding(FakeValue("DNF", dnf, False)) # Reduce to dnf
cond = state._restrict_condition(self._node, [x, y], True)
self.check_binding("a=? | b=? c=? | x=?", cond.binding,
a=a, b=b, c=c, x=x, y=y)
if __name__ == "__main__":
unittest.main()
```
#### File: pytype/tests/test_calls.py
```python
from pytype import utils
from pytype.tests import test_inference
class CallsTest(test_inference.InferenceTest):
"""Tests for checking function calls."""
def testOptional(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x: int, y: int = ..., z: int = ...) -> int
""")
self.assertNoErrors("""\
import mod
mod.foo(1)
mod.foo(1, 2)
mod.foo(1, 2, 3)
""", pythonpath=[d.path])
def testMissing(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "missing-parameter")])
def testExtraneous(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1, 2, 3)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "wrong-arg-count")])
def testMissingKwOnly(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y, *, z) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1, 2)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "missing-parameter", r"\bz\b")])
def testExtraKeyword(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1, 2, z=3)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "wrong-keyword-args")])
if __name__ == "__main__":
test_inference.main()
```
#### File: pytype/tests/test_inheritance.py
```python
import unittest
from pytype.pytd import pytd
from pytype.tests import test_inference
class InheritanceTest(test_inference.InferenceTest):
"""Tests for class inheritance."""
@unittest.skip("needs (re-)analyzing methods on subclasses")
def testSubclassAttributes(self):
ty = self.Infer("""
class Base(object):
def get_lineno(self):
return self.lineno
class Leaf(Base):
lineno = 0
""", deep=True, solve_unknowns=False, show_library_calls=True)
self.assertTypesMatchPytd(ty, """
class Base:
pass
class Leaf(Base):
lineno: int
def get_lineno(self) -> int
""")
def testClassAttributes(self):
ty = self.Infer("""
class A(object):
pass
class B(A):
pass
A.x = 3
A.y = 3
B.x = "foo"
def ax():
return A.x
def bx():
return B.x
def ay():
return A.y
def by():
return A.y
""", deep=True, solve_unknowns=False, show_library_calls=True)
self.assertOnlyHasReturnType(ty.Lookup("ax"), self.int)
self.assertOnlyHasReturnType(ty.Lookup("bx"), self.str)
self.assertOnlyHasReturnType(ty.Lookup("ay"), self.int)
self.assertOnlyHasReturnType(ty.Lookup("by"), self.int)
def testMultipleInheritance(self):
ty = self.Infer("""
class A(object):
x = 1
class B(A):
y = 4
class C(A):
y = "str"
z = 3j
class D(B, C):
pass
def x():
return D.x
def y():
return D.y
def z():
return D.z
""", deep=True, solve_unknowns=False, show_library_calls=True)
self.assertOnlyHasReturnType(ty.Lookup("x"), self.int)
self.assertOnlyHasReturnType(ty.Lookup("y"), self.int)
self.assertOnlyHasReturnType(ty.Lookup("z"), self.complex)
@unittest.skip("Needs type parameters on inherited classes.")
def testInheritFromBuiltins(self):
ty = self.Infer("""
class MyDict(dict):
def __init__(self):
dict.__setitem__(self, "abc", "foo")
def f():
return NoCaseKeysDict()
""", deep=False, solve_unknowns=False, show_library_calls=True)
mydict = ty.Lookup("MyDict")
self.assertOnlyHasReturnType(ty.Lookup("f"),
pytd.ClassType("MyDict", mydict))
def testInheritMethodsFromObject(self):
# Test that even in the presence of multi-level inheritance,
# we can still see attributes from "object".
ty = self.Infer("""
class A(object):
pass
class B(A):
pass
def f():
return A().__sizeof__()
def g():
return B().__sizeof__()
def h():
return "bla".__sizeof__()
f(); g(); h()
""", deep=False, solve_unknowns=False, show_library_calls=True)
self.assertOnlyHasReturnType(ty.Lookup("f"), self.int)
self.assertOnlyHasReturnType(ty.Lookup("g"), self.int)
self.assertOnlyHasReturnType(ty.Lookup("h"), self.int)
def testMRO(self):
ty = self.Infer("""
class A(object):
def a(self):
return 1
class B(A):
def b(self):
return 1.0
class C(A):
def b(self):
# ignored in D, B.b has precedence
return "foo"
class D(B, C):
pass
def f():
return A().a()
def g():
return B().b()
def h():
return C().b()
def i():
return D().b()
""", deep=True, solve_unknowns=False, show_library_calls=True)
self.assertOnlyHasReturnType(ty.Lookup("f"), self.int)
self.assertOnlyHasReturnType(ty.Lookup("g"), self.float)
self.assertOnlyHasReturnType(ty.Lookup("h"), self.str)
self.assertOnlyHasReturnType(ty.Lookup("i"), self.float)
if __name__ == "__main__":
test_inference.main()
```
#### File: pytype/tests/test_pyi.py
```python
from pytype import utils
from pytype.tests import test_inference
class PYITest(test_inference.InferenceTest):
"""Tests for PYI."""
def testModuleParameter(self):
"""This test that types.ModuleType works."""
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
import types
def f(x: types.ModuleType = ...) -> None
""")
self.assertNoErrors("""
import os
import mod
mod.f(os)
""", pythonpath=[d.path])
def testOptional(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def f(x: int = ...) -> None
""")
ty = self.Infer("""\
import mod
def f():
return mod.f()
def g():
return mod.f(3)
""", deep=True, solve_unknowns=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
mod = ... # type: module
def f() -> NoneType
def g() -> NoneType
""")
def testSolve(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def f(node: int, *args, **kwargs) -> str
""")
ty = self.Infer("""\
import mod
def g(x):
return mod.f(x)
""", deep=True, solve_unknowns=True, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
mod = ... # type: module
def g(x: int) -> str
""")
def testTyping(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
from typing import Any, IO, List, Optional
def split(s: Optional[int]) -> List[str, ...]: ...
""")
ty = self.Infer("""\
import mod
def g(x):
return mod.split(x)
""", deep=True, solve_unknowns=True, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import List
mod = ... # type: module
def g(x: NoneType or int) -> List[str, ...]
""")
def testClasses(self):
with utils.Tempdir() as d:
d.create_file("classes.pyi", """
class A(object):
def foo(self) -> A
class B(A):
pass
""")
ty = self.Infer("""\
import classes
x = classes.B().foo()
""", deep=False, solve_unknowns=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
classes = ... # type: module
x = ... # type: classes.A
""")
def testEmptyModule(self):
with utils.Tempdir() as d:
d.create_file("vague.pyi", """
from typing import Any
def __getattr__(name) -> Any
""")
ty = self.Infer("""\
import vague
x = vague.foo + vague.bar
""", deep=False, solve_unknowns=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
vague = ... # type: module
x = ... # type: Any
""")
def testDecorators(self):
with utils.Tempdir() as d:
d.create_file("decorated.pyi", """
class A(object):
@staticmethod
def u(a, b) -> int: ...
@classmethod
def v(cls, a, b) -> int: ...
def w(self, a, b) -> int: ...
""")
ty = self.Infer("""\
import decorated
u = decorated.A.u(1, 2)
v = decorated.A.v(1, 2)
a = decorated.A()
x = a.u(1, 2)
y = a.v(1, 2)
z = a.w(1, 2)
""", deep=False, solve_unknowns=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
decorated = ... # type: module
a = ... # type: decorated.A
u = ... # type: int
v = ... # type: int
x = ... # type: int
y = ... # type: int
z = ... # type: int
""")
def testPassPyiClassmethod(self):
with utils.Tempdir() as d:
d.create_file("a.pyi", """
class A(object):
@classmethod
def v(cls) -> float: ...
def w(self, x: classmethod) -> int: ...
""")
ty = self.Infer("""\
import a
u = a.A().w(a.A.v)
""", deep=False, solve_unknowns=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
a = ... # type: module
u = ... # type: int
""")
def testOptionalParameters(self):
with utils.Tempdir() as d:
d.create_file("a.pyi", """
def parse(source, filename = ..., mode = ..., *args, **kwargs) -> int: ...
""")
ty = self.Infer("""\
import a
u = a.parse("True")
""", deep=False, solve_unknowns=True, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
a = ... # type: module
u = ... # type: int
""")
def testOptimize(self):
with utils.Tempdir() as d:
d.create_file("a.pyi", """
class Bar(dict[?, int]): ...
""")
ty = self.Infer("""\
import a
def f(foo, bar):
return __any_object__[1]
def g():
out = f('foo', 'bar')
out = out.split()
""", deep=True, solve_unknowns=True, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
a = ... # type: module
def f(foo, bar) -> Any
def g() -> NoneType: ...
""")
def testIterable(self):
with utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Iterable
def f(l: Iterable[int]) -> int: ...
""")
ty = self.Infer("""\
import a
u = a.f([1, 2, 3])
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
a = ... # type: module
u = ... # type: int
""")
def testObject(self):
with utils.Tempdir() as d:
d.create_file("a.pyi", """
def make_object() -> object
""")
ty = self.Infer("""\
import a
def f(x=None):
x = a.make_object()
z = x - __any_object__ # type: ignore
z + __any_object__
return True
""", deep=True, pythonpath=[d.path], solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
a = ... # type: module
def f(x=...) -> bool: ...
""")
def testCallable(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Any
from typing import Callable
def process_function(func: Callable[..., Any]) -> None: ...
""")
ty = self.Infer("""\
import foo
def bar():
pass
x = foo.process_function(bar)
""", deep=False, pythonpath=[d.path], solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import Any
foo = ... # type: module
def bar() -> Any: ... # 'Any' because deep=False
x = ... # type: NoneType
""")
def testHex(self):
ty = self.Infer("""\
x = hex(4)
""", deep=False, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
x = ... # type: str
""")
def testBaseClass(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Generic, TypeVar
S = TypeVar('S')
T = TypeVar('T')
class A(Generic[S]):
def bar(self, s: S) -> S: ...
class B(Generic[T], A[T]): ...
class C(A[int]): ...
class D(object):
def baz(self) -> int
""")
ty = self.Infer("""\
import foo
def f(x):
return x.bar("foo")
def g(x):
return x.bar(3)
def h(x):
return x.baz()
""", deep=True, pythonpath=[d.path], solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import Union
foo = ... # type: module
def f(x: Union[foo.A[str], foo.B[str]]) -> str
def g(x: Union[foo.A[int], foo.B[int], foo.C]) -> int
def h(x: foo.D) -> int
""")
def testAnonymousProperty(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
class Foo:
x = ... # type: property
""")
ty = self.Infer("""\
import foo
x = foo.Foo().x
x.bar()
""", deep=True, pythonpath=[d.path], solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
x = ... # type: ?
""")
def testOldStyleClassObjectMatch(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Any
def f(x) -> Any
class Foo: pass
""")
ty = self.Infer("""
import foo
def g():
return foo.f(foo.Foo())
""", deep=True, pythonpath=[d.path], solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import Any
foo = ... # type: module
def g() -> Any
""")
def testBytes(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
def f() -> bytes
""")
ty = self.Infer("""
import foo
x = foo.f()
""", deep=True, pythonpath=[d.path], solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
x = ... # type: str
""")
def testIdentity(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import TypeVar
T = TypeVar("T")
def f(x: T) -> T
""")
ty = self.Infer("""\
import foo
x = foo.f(3)
""", deep=True, pythonpath=[d.path], solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
x = ... # type: int
""")
def testImportFunctionTemplate(self):
with utils.Tempdir() as d1:
d1.create_file("foo.pyi", """
from typing import TypeVar
T = TypeVar("T")
def f(x: T) -> T
""")
with utils.Tempdir() as d2:
d2.create_file("bar.pyi", """
import foo
f = foo.f
""")
ty = self.Infer("""
import bar
x = bar.f("")
""", pythonpath=[d1.path, d2.path], deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
bar = ... # type: module
x = ... # type: str
""")
def testMultipleGetAttr(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Any
def __getattr__(name) -> Any
""")
ty, errors = self.InferAndCheck("""
from foo import *
from bar import * # Nonsense import generates a top-level __getattr__
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
def __getattr__(name) -> Any
""")
self.assertErrorLogIs(errors, [(3, "import-error", r"bar")])
def testPyiListItem(self):
with utils.Tempdir() as d:
d.create_file("a.pyi", """
lst = ... # type: list
def f(x: int) -> str
""")
ty = self.Infer("""
import a
x = a.f(a.lst[0])
""", pythonpath=[d.path], deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
a = ... # type: module
x = ... # type: str
""")
def testSketchyFunctionReference(self):
with utils.Tempdir() as d:
# TODO(kramm): visitors._ToType() currently allows this. Should it?
d.create_file("a.pyi", """
def SketchyType() -> None
x = ... # type: SketchyType
""")
ty = self.Infer("""\
import a
x = a.x
""", deep=True, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
a = ... # type: module
def x() -> None: ...
""")
def testKeywordOnlyArgs(self):
with utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Any
def foo(x: str, *y: Any, z: complex = ...) -> int: ...
""")
ty = self.Infer("""\
import a
x = a.foo("foo %d %d", 3, 3)
""", deep=True, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
a = ... # type: module
x = ... # type: int
""")
def testSignature(self):
with utils.Tempdir() as d:
d.create_file("a.pyi", """
T = TypeVar("T")
def get_pos(x: T, *args: int, z: int, **kws: int) -> T: ...
def get_kwonly(x: int, *args: int, z: T, **kws: int) -> T: ...
def get_varargs(x: int, *args: T, z: int, **kws: int) -> T: ...
def get_kwargs(x: int, *args: int, z: int, **kws: T) -> T: ...
""")
ty = self.Infer("""\
import a
k = a.get_pos("foo", 3, 4, z=5)
l = a.get_kwonly(3, 4, z=5j)
m = a.get_varargs(1, *[1j, "foo"], z=3)
n = a.get_kwargs(1, **dict())
o = a.get_varargs(1, 2j, "foo", z=5)
p = a.get_kwargs(1, 2, 3, z=5, u=3j)
""", deep=True, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
a = ... # type: module
k = ... # type: str
l = ... # type: complex
# TODO(kramm): Fix call_function_from_stack. The below should be:
# m = ... # type: Union[complex, str]
# n = ... # type: complex
# o = ... # type: Union[complex, str]
# p = ... # type: complex
m = ... # type: Any
n = ... # type: Any
o = ... # type: Any
p = ... # type: Any
""")
def testStarArgs(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Dict, TypeVar
K = TypeVar("K")
V = TypeVar("V")
def foo(a: K, *b, c: V, **d) -> Dict[K, V]: ...
""")
ty, errors = self.InferAndCheck("""\
import foo
a = foo.foo(*tuple(), **dict())
b = foo.foo(*(1,), **{"c": 3j})
c = foo.foo(*(1,))
d = foo.foo(*(), **{"d": 3j})
""", solve_unknowns=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any, Dict
foo = ... # type: module
a = ... # type: dict
b = ... # type: Dict[int, complex]
c = ... # type: Any
d = ... # type: Any
""")
self.assertErrorLogIs(errors, [
(4, "missing-parameter", r"\bc\b"),
(5, "missing-parameter", r"\ba\b"),
])
def testUnionWithSuperclass(self):
with utils.Tempdir() as d:
d.create_file("a.pyi", """
class A1(): pass
class A2(A1): pass
class A3(A2): pass
""")
ty = self.Infer("""
import a
def f(x):
# Constrain the type of x so it doesn't pull everything into our pytd
x = x + 16
if x:
return a.A1()
else:
return a.A3()
""", pythonpath=[d.path], deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
a = ... # type: module
def f(x: complex or float or int) -> a.A1
""")
def testBuiltinsModule(self):
with utils.Tempdir() as d:
d.create_file("a.pyi", """
import __builtin__
x = ... # type: __builtin__.int
""")
ty = self.Infer("""
import a
x = a.x
""", pythonpath=[d.path], deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
a = ... # type: module
x = ... # type: int
""")
def testFrozenSet(self):
with utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Any, FrozenSet, Set
x = ... # type: FrozenSet[str]
y = ... # type: Set[Any]
""")
ty = self.Infer("""
import a
x = a.x - a.x
y = a.x - a.y
""", pythonpath=[d.path], solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import FrozenSet
a = ... # type: module
x = ... # type: FrozenSet[str]
y = ... # type: FrozenSet[str]
""")
if __name__ == "__main__":
test_inference.main()
```
#### File: pytype/tests/test_recovery.py
```python
from pytype.tests import test_inference
class RecoveryTests(test_inference.InferenceTest):
"""Tests for recovering after errors.
The type inferencer can warn about bad code, but it should never blow up.
These tests check that we don't faceplant when we encounter difficult code.
"""
def testBadSubtract(self):
ty = self.Infer("""
def f():
t = 0.0
return t - ("bla" - t)
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
def f() -> ?
""")
def testBadCall(self):
ty = self.Infer("""
def f():
return "%s" % chr("foo")
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
def f() -> str
""")
def testBadFunction(self):
ty = self.Infer("""
import time
def f():
return time.unknown_function(3)
def g():
return '%s' % f()
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
time = ... # type: module
def f() -> ?
def g() -> str
""")
def testInheritFromInstance(self):
ty = self.Infer("""
class Foo(3):
pass
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
class Foo(?):
pass
""")
def testNameError(self):
ty = self.Infer("""
x = foobar
class A(x):
pass
pow(A(), 2)
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
x = ... # type: ?
class A(?):
pass
""")
def testObjectAttr(self):
self.assertNoCrash("""
object.bla(int)
""")
def testAttrError(self):
ty = self.Infer("""
class A:
pass
x = A.x
class B:
pass
y = "foo".foo()
object.bar(int)
class C:
pass
""", deep=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
class A:
pass
x = ... # type: ?
class B:
pass
y = ... # type: ?
class C:
pass
""")
def testNoSelf(self):
ty = self.Infer("""
class Foo(object):
def foo():
pass
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
class Foo(object):
def foo(): ...
""")
def testWrongCall(self):
ty = self.Infer("""
def f():
pass
f("foo")
x = 3
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
def f() -> None: ...
x = ... # type: int
""")
def testDuplicateIdentifier(self):
ty = self.Infer("""
class A(object):
def __init__(self):
self.foo = 3
def foo(self):
pass
""", deep=True)
self.assertTypesMatchPytd(ty, """
from typing import Any
class A(object):
foo = ... # type: Any
""")
if __name__ == "__main__":
test_inference.main()
```
#### File: pytype/tests/test_solver.py
```python
import unittest
from pytype import utils
from pytype.tests import test_inference
class SolverTests(test_inference.InferenceTest):
"""Tests for type inference that also runs convert_structural.py."""
def testAmbiguousAttr(self):
ty = self.Infer("""
class Node(object):
children = ()
def __init__(self):
self.children = []
for ch in self.children:
pass
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import List, Tuple
class Node(object):
children = ... # type: List[nothing, ...] or Tuple[nothing, ...]
""")
def testCall(self):
ty = self.Infer("""
def f():
x = __any_object__
y = x.foo
z = y()
eval(y)
return z
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
def f() -> ?
""")
def testTypeParameters(self):
ty = self.Infer("""
def f(A):
A.has_key("foo")
return [a - 42.0 for a in A.viewvalues()]
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import List
def f(A: dict[?, float or complex or int]) -> List[float or complex, ...]
""")
def testAnythingTypeParameters(self):
ty = self.Infer("""
def f(x):
return x.keys()
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import Mapping
def f(x: Mapping) -> list
""")
@unittest.skip("Infers x as Any because dict params are nothing")
def testNothingTypeParameters(self):
ty = self.Infer("""
def f(x):
x[""] = dict()
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
def f(x: Dict[str, dict]) -> None
""")
def testNameConflict(self):
ty = self.Infer("""
import StringIO
class Foobar(object):
def foobar(self, out):
out.write('')
class Barbaz(object):
def barbaz(self):
__any_object__.foobar(StringIO.StringIO())
""", deep=True, solve_unknowns=True)
# TODO(rechen): Both StringIO[str] and BinaryIO are subclasses of IO[str],
# which therefore should be optimized away.
self.assertTypesMatchPytd(ty, """
from typing import BinaryIO, IO
StringIO = ... # type: module
class Foobar(object):
def foobar(self, out: StringIO.StringIO[str] or BinaryIO or IO[str]) -> NoneType
class Barbaz(object):
def barbaz(self) -> NoneType
""")
def testTopLevelClass(self):
ty = self.Infer("""
import Foo # bad import
class Bar(Foo):
pass
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
Foo = ... # type: ?
class Bar(?):
pass
""")
def testDictWithNothing(self):
ty = self.Infer("""
def f():
d = {}
d[1] = "foo"
for name in d:
len(name)
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
def f() -> NoneType
""")
def testOptionalParams(self):
ty = self.Infer("""
class Foo(object):
def __init__(self, *types):
self.types = types
def bar(self, val):
return issubclass(val, self.types)
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import Tuple
class Foo(object):
def __init__(self, *types) -> NoneType
types = ... # type: Tuple[type, ...]
def bar(self, val) -> bool
""")
@unittest.skip("isinstance() doesn't record a type signature")
def testOptionalParams_obsolete(self):
ty = self.Infer("""
class Foo(object):
def __init__(self, *types):
self.types = types
def bar(self, val):
return isinstance(val, self.types)
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
class Foo(object):
def __init__(self, *types) -> NoneType
types = ... # type: Tuple[type, ...]
def bar(self, val) -> bool
""")
def testNestedClass(self):
ty = self.Infer("""
class Foo(object):
def f(self):
class Foo(object):
pass
return Foo()
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
class Foo(object):
def f(self) -> ?
""")
def testEmptyTupleAsArg(self):
ty = self.Infer("""
def f():
return isinstance(1, ())
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
def f() -> bool
""")
def testIdentityFunction(self):
ty = self.Infer("""
def f(x):
return x
l = ["x"]
d = {}
d[l[0]] = 3
f(**d)
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import Dict, List
def f(x) -> ?
d = ... # type: Dict[str, int]
l = ... # type: List[str, ...]
""")
def testCallConstructor(self):
ty = self.Infer("""
def f(x):
return int(x, 16)
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import SupportsInt
def f(x: int or SupportsInt or str or unicode) -> int
""")
def testCallMethod(self):
ty = self.Infer("""
def f(x):
return "abc".find(x)
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
def f(x: basestring or bytearray) -> int
""")
def testImport(self):
ty = self.Infer("""
import itertools
def every(f, array):
return all(itertools.imap(f, array))
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
itertools = ... # type: module
def every(f: typing.Callable, array: typing.Iterable) -> bool
""")
def testNestedList(self):
ty = self.Infer("""
foo = [[]]
bar = []
def f():
for obj in foo[0]:
bar.append(obj)
def g():
f()
foo[0].append(42)
f()
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import List
foo = ... # type: List[list[int, ...], ...]
bar = ... # type: List[int, ...]
def f() -> NoneType
def g() -> NoneType
""")
def testTwiceNestedList(self):
ty = self.Infer("""
foo = [[[]]]
bar = []
def f():
for obj in foo[0][0]:
bar.append(obj)
def g():
f()
foo[0][0].append(42)
f()
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import List
foo = ... # type: List[List[List[int, ...], ...], ...]
bar = ... # type: List[int, ...]
def f() -> NoneType
def g() -> NoneType
""")
def testNestedListInClass(self):
ty = self.Infer("""
class Container(object):
def __init__(self):
self.foo = [[]]
self.bar = []
container = Container()
def f():
for obj in container.foo[0]:
container.bar.append(obj)
def g():
f()
container.foo[0].append(42)
f()
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import List
class Container(object):
foo = ... # type: List[List[int, ...], ...]
bar = ... # type: List[int, ...]
container = ... # type: Container
def f() -> NoneType
def g() -> NoneType
""")
def testMatchAgainstFunctionWithoutSelf(self):
with utils.Tempdir() as d:
d.create_file("bad_mod.pyi", """
class myclass:
def bad_method() -> bool
""")
ty = self.Infer("""\
import bad_mod
def f(date):
return date.bad_method()
""", deep=True, solve_unknowns=True, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
bad_mod = ... # type: module
def f(date: bad_mod.myclass) -> bool
""")
def testExternalName(self):
ty = self.Infer("""\
import collections
def bar(l):
l.append(collections.defaultdict(int, [(0, 0)]))
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
import typing
collections = ... # type: module
# TODO(kramm): The optimizer should collapse these two.
def bar(l: typing.List[collections.defaultdict] or
typing.MutableSequence[collections.defaultdict]) -> NoneType
""")
def testNameConflictWithBuiltin(self):
ty = self.Infer("""\
class LookupError(KeyError):
pass
def f(x):
pass
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
class LookupError(KeyError): ...
def f(x) -> NoneType
""")
def testMutatingTypeParameters(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import List
def f() -> List[int]
""")
ty = self.Infer("""
import foo
def f():
x = foo.f()
x.append("str")
return x
""", deep=True, pythonpath=[d.path], solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import List
foo = ... # type: module
def f() -> List[int or str]
""")
@unittest.skip("type_match.py needs support for kwonly.")
def testDuplicateKeyword(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
def f(x, *args, y) -> None
""")
self.Infer("""\
import foo
foo.f(1, y=2)
""", pythonpath=[d.path], solve_unknowns=True)
if __name__ == "__main__":
test_inference.main()
```
#### File: pytype/tests/test_super.py
```python
from pytype import utils
from pytype.tests import test_inference
class SuperTest(test_inference.InferenceTest):
"""Tests for super()."""
def testSetAttr(self):
self.assertNoErrors("""
class Foo(object):
def foo(self, name, value):
super(Foo, self).__setattr__(name, value)
""")
def testStr(self):
self.assertNoErrors("""
class Foo(object):
def foo(self, name, value):
super(Foo, self).__str__()
""")
def testGet(self):
self.assertNoErrors("""
class Foo(object):
def foo(self, name, value):
super(Foo, self).__get__(name)
""")
def testSet(self):
self.assertNoErrors("""
class Foo(object):
def foo(self, name, value):
super(Foo, self).__set__(name, value)
""")
def testInit(self):
self.assertNoErrors("""
class Foo(object):
def foo(self, name, value):
super(Foo, self).__init__()
""")
def testGetAttr(self):
self.assertNoErrors("""
class Foo(object):
def hello(self, name):
getattr(super(Foo, self), name)
""")
def testGetAttrMultipleInheritance(self):
self.assertNoErrors("""
class X(object):
pass
class Y(object):
bla = 123
class Foo(X, Y):
def hello(self):
getattr(super(Foo, self), "bla")
""")
def testGetAttrInheritance(self):
self.assertNoErrors("""
class Y(object):
bla = 123
class Foo(Y):
def hello(self):
getattr(super(Foo, self), "bla")
""")
def testIsInstance(self):
self.assertNoErrors("""
class Y(object):
pass
class Foo(Y):
def hello(self):
return isinstance(super(Foo, self), Y)
""")
def testCallSuper(self):
_, errorlog = self.InferAndCheck("""
class Y(object):
pass
class Foo(Y):
def hello(self):
return super(Foo, self)()
""")
self.assertEquals(1, len(errorlog))
self.assertErrorLogContains(errorlog, r"super.*\[not\-callable\]")
def testSuperType(self):
ty = self.Infer("""
class A(object):
pass
x = super(type, A)
""", deep=True)
self.assertTypesMatchPytd(ty, """
class A(object):
pass
x = ... # type: super
""")
def testSuperWithAmbiguousBase(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
class Grandparent(object):
def f(self) -> int
""")
ty = self.Infer("""
import foo
class Parent(foo.Grandparent):
pass
OtherParent = __any_object__
class Child(OtherParent, Parent):
def f(self):
return super(Parent, self).f()
""", pythonpath=[d.path], deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import Any
foo = ... # type: module
class Parent(foo.Grandparent): ...
OtherParent = ... # type: Any
class Child(Any, Parent): ...
""")
def testSuperWithAny(self):
self.assertNoErrors("""
super(__any_object__, __any_object__)
""")
if __name__ == "__main__":
test_inference.main()
```
#### File: pytype/tests/test_typing.py
```python
from pytype import utils
from pytype.pytd import pep484
from pytype.tests import test_inference
class TypingTest(test_inference.InferenceTest):
"""Tests for typing.py."""
_TEMPLATE = """
from __future__ import google_type_annotations
import collections
import typing
def f(s: %(annotation)s):
return s
f(%(arg)s)
"""
def _test_match(self, arg, annotation):
self.assertNoErrors(self._TEMPLATE % locals())
def _test_no_match(self, arg, annotation):
_, errors = self.InferAndCheck(self._TEMPLATE % locals())
self.assertNotEqual(0, len(errors))
def test_list_match(self):
self._test_match("[1, 2, 3]", "typing.List")
self._test_match("[1, 2, 3]", "typing.List[int]")
self._test_match("[1, 2, 3.1]", "typing.List[typing.Union[int, float]]")
self._test_no_match("[1.1, 2.1, 3.1]", "typing.List[int]")
def test_sequence_match(self):
self._test_match("[1, 2, 3]", "typing.Sequence")
self._test_match("[1, 2, 3]", "typing.Sequence[int]")
self._test_match("(1, 2, 3.1)", "typing.Sequence[typing.Union[int, float]]")
self._test_no_match("[1.1, 2.1, 3.1]", "typing.Sequence[int]")
def test_namedtuple_match(self):
self._test_match("collections.namedtuple('foo', [])()",
"typing.NamedTuple")
self._test_match("collections.namedtuple('foo', ('x', 'y'))()",
"typing.NamedTuple('foo', [('x', int), ('y', int)])")
def test_all(self):
ty = self.Infer("""
from __future__ import google_type_annotations
import typing
x = typing.__all__
""")
self.assertTypesMatchPytd(ty, """
from typing import List
typing = ... # type: module
x = ... # type: List[str]
""")
def test_cast1(self):
ty = self.Infer("""
from __future__ import google_type_annotations
import typing
def f():
return typing.cast(typing.List[int], [])
""", deep=True, solve_unknowns=True)
self.assertTypesMatchPytd(ty, """
from typing import Any
typing = ... # type: module
def f() -> Any
""")
def test_cast2(self):
self.assertNoErrors("""
from __future__ import google_type_annotations
import typing
foo = typing.cast(typing.Dict, {})
""")
def test_generator(self):
self.assertNoErrors("""\
from __future__ import google_type_annotations
from typing import Generator
def f() -> Generator[int]:
for i in range(3):
yield i
""")
def test_type(self):
ty, errors = self.InferAndCheck("""\
from __future__ import google_type_annotations
from typing import Type
class Foo:
x = 1
def f1(foo: Type[Foo]):
return foo.x
def f2(foo: Type[Foo]):
return foo.y # bad
def f3(foo: Type[Foo]):
return foo.mro()
def f4(foo: Type[Foo]):
return foo()
v1 = f1(Foo)
v2 = f2(Foo)
v3 = f3(Foo)
v4 = f4(Foo)
""")
self.assertErrorLogIs(errors, [(8, "attribute-error", r"y.*Foo")])
self.assertTypesMatchPytd(ty, """
from typing import Any, Type
class Foo:
x = ... # type: int
def f1(foo: Type[Foo]) -> int
def f2(foo: Type[Foo]) -> Any
def f3(foo: Type[Foo]) -> list
def f4(foo: Type[Foo]) -> Foo
v1 = ... # type: int
v2 = ... # type: Any
v3 = ... # type: list
v4 = ... # type: Foo
""")
def test_type_union(self):
self.assertNoErrors("""\
from __future__ import google_type_annotations
from typing import Type, Union
class Foo:
bar = ... # type: int
def f1(x: Type[Union[int, Foo]]):
x.bar
def f2(x: Union[Type[int], Type[Foo]]):
x.bar
f1(x)
def f3(x: Type[Union[int, Foo]]):
f1(x)
f2(x)
""")
def test_generate_type_alias(self):
ty = self.Infer("""
from __future__ import google_type_annotations
from typing import List
MyType = List[str]
""")
self.assertTypesMatchPytd(ty, """
from typing import List
MyType = List[str]
""")
def test_use_type_alias(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import List
MyType = List[str]
""")
self.assertNoErrors("""
from __future__ import google_type_annotations
import foo
def f(x: foo.MyType):
pass
f([""])
""", pythonpath=[d.path])
def test_callable(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Callable
def f() -> Callable
""")
self.assertNoErrors("""\
from __future__ import google_type_annotations
from typing import Callable
import foo
def f() -> Callable:
return foo.f()
def g() -> Callable:
return int
""", pythonpath=[d.path])
def test_generics(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Dict
K = TypeVar("K")
V = TypeVar("V")
class CustomDict(Dict[K, V]): ...
""")
self.assertNoErrors("""\
from __future__ import google_type_annotations
import typing
import foo
def f(x: typing.Callable[..., int]): pass
def f(x: typing.Iterator[int]): pass
def f(x: typing.Iterable[int]): pass
def f(x: typing.Container[int]): pass
def f(x: typing.Sequence[int]): pass
def f(x: typing.Tuple[int, str]): pass
def f(x: typing.MutableSequence[int]): pass
def f(x: typing.List[int]): pass
def f(x: typing.IO[str]): pass
def f(x: typing.Mapping[int, str]): pass
def f(x: typing.MutableMapping[int, str]): pass
def f(x: typing.Dict[int, str]): pass
def f(x: typing.AbstractSet[int]): pass
def f(x: typing.FrozenSet[int]): pass
def f(x: typing.MutableSet[int]): pass
def f(x: typing.Set[int]): pass
def f(x: typing.Reversible[int]): pass
def f(x: typing.SupportsAbs[int]): pass
def f(x: typing.Optional[int]): pass
def f(x: typing.Generator[int]): pass
def f(x: typing.Type[int]): pass
def f(x: typing.Pattern[str]): pass
def f(x: typing.Match[str]): pass
def f(x: foo.CustomDict[int, str]): pass
""", pythonpath=[d.path])
def test_generator_iterator_match(self):
self.assertNoErrors("""
from __future__ import google_type_annotations
from typing import Iterator
def f(x: Iterator[int]):
pass
f(x for x in [42])
""")
def testNameConflict(self):
ty = self.Infer("""
from __future__ import google_type_annotations
import typing
def f() -> typing.Any:
pass
class Any(object):
pass
def g() -> Any:
pass
""")
self.assertTypesMatchPytd(ty, """
import __future__
typing = ... # type: module
def f() -> typing.Any: ...
def g() -> Any: ...
class Any(object):
pass
""")
def testImportAll(self):
python = [
"from __future__ import google_type_annotations",
"from typing import * # pytype: disable=not-supported-yet",
] + pep484.PEP484_NAMES
ty = self.Infer("\n".join(python))
self.assertTypesMatchPytd(ty, "")
def testRecursiveTuple(self):
with utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Tuple
class Foo(Tuple[Foo]): ...
""")
self.assertNoErrors("""\
import foo
foo.Foo()
""", pythonpath=[d.path])
def testBaseClass(self):
ty = self.Infer("""\
from __future__ import google_type_annotations
from typing import Iterable
class Foo(Iterable):
pass
""")
self.assertTypesMatchPytd(ty, """
from typing import Iterable
class Foo(Iterable): ...
""")
if __name__ == "__main__":
test_inference.main()
```
|
{
"source": "JelleZijlstra/regdis",
"score": 2
}
|
#### File: regdis/regdis/dis.py
```python
import ctypes
from itertools import islice
import pprint
import re
import sre_constants
HAVE_ARG = {
sre_constants.MARK,
sre_constants.GROUPREF,
sre_constants.GROUPREF_IGNORE,
}
HAVE_LITERAL_ARG = {
sre_constants.LITERAL,
sre_constants.NOT_LITERAL,
sre_constants.LITERAL_IGNORE,
sre_constants.NOT_LITERAL_IGNORE,
}
NO_ARG = {
sre_constants.SUCCESS,
sre_constants.FAILURE,
sre_constants.ANY,
sre_constants.ANY_ALL,
}
SIZEOF_SRE_CODE = 4
SRE_CODE_BITS = 8 * SIZEOF_SRE_CODE
class InvalidCodeError(Exception):
pass
def get_code(pattern, unsafe=False):
"""Returns the code for this regex pattern.
If unsafe is False, either uses the __pattern_code__ attribute or raises an error. If it is
True, falls back to using ctypes to either produce the code or die a horrible death.
"""
if isinstance(pattern, str):
pattern = re.compile(pattern)
try:
return pattern.__pattern_code__
except AttributeError:
if not unsafe:
raise NotImplementedError(
'regdis requires a Python version that exposes __pattern_code__')
int_ptr = ctypes.POINTER(ctypes.c_uint32)
offset = (
0
# _PyObject_HEAD_EXTRA, probably only in debug builds
+ 2 * ctypes.sizeof(int_ptr)
+ ctypes.sizeof(ctypes.c_long) # ob_refcnt
+ ctypes.sizeof(int_ptr) # ob_type
+ ctypes.sizeof(ctypes.c_long) # ob_size
+ ctypes.sizeof(ctypes.c_long) # groups
+ ctypes.sizeof(int_ptr) # groupindex
+ ctypes.sizeof(int_ptr) # indexgroup
+ ctypes.sizeof(int_ptr) # pattern
# actually an int but alignment
+ ctypes.sizeof(ctypes.c_long) # flags
+ ctypes.sizeof(int_ptr) # weakreflist
# same here
+ ctypes.sizeof(ctypes.c_long) # isbytes
)
pattern_address = id(pattern)
code_size = ctypes.c_long.from_address(pattern_address + offset).value
code_start = pattern_address + offset + ctypes.sizeof(ctypes.c_long)
code = []
for i in range(code_size):
address = code_start + i * ctypes.sizeof(ctypes.c_uint32)
code.append(ctypes.c_uint32.from_address(address).value)
return tuple(code)
def dis(pattern, unsafe=False):
"""Disassemble a pattern's instructions into a readable format."""
pprint.pprint(list(get_instructions(pattern, unsafe=unsafe)))
def get_instructions(pattern, unsafe=False):
"""Generator of the instructions in a pattern."""
# closely follows _validate_inner in _sre.c
code = get_code(pattern, unsafe=unsafe)
code_it = _CountingIterator(code)
return _get_instructions_inner(code_it)
def _get_instructions_inner(code_it, max_pos=None):
for codepoint in code_it:
op = sre_constants.OPCODES[codepoint]
if op in HAVE_ARG:
arg = next(code_it)
yield (op, arg)
elif op in HAVE_LITERAL_ARG:
arg = next(code_it)
yield (op, chr(arg))
elif op in NO_ARG:
yield (op, None)
elif op == sre_constants.AT:
arg = next(code_it)
yield (op, sre_constants.ATCODES[arg])
elif op in (sre_constants.IN, sre_constants.IN_IGNORE):
skip = next(code_it)
charset = _consume_and_ensure_following(
disassemble_charset, code_it, code_it.count + skip - 1, sre_constants.FAILURE)
yield (op, (skip, charset))
elif op == sre_constants.INFO:
yield (op, _disassemble_info(code_it))
elif op == sre_constants.BRANCH:
yield (op, _disassemble_branch(code_it))
elif op in (sre_constants.REPEAT_ONE, sre_constants.MIN_REPEAT_ONE):
args = {}
skip = args['skip'] = next(code_it)
inner_max_pos = code_it.count + skip - 1
args['min'] = next(code_it)
args['max'] = next(code_it)
if args['min'] > args['max'] or args['max'] > sre_constants.MAXREPEAT:
raise InvalidCodeError('Invalid min or max value')
args['inner'] = _consume_and_ensure_following(
_get_instructions_inner, code_it, inner_max_pos, sre_constants.SUCCESS)
_ensure_position(code_it, inner_max_pos)
yield (op, args)
elif op == sre_constants.REPEAT:
args = {}
skip = args['skip'] = next(code_it)
inner_max_pos = code_it.count + skip - 1
args['min'] = next(code_it)
args['max'] = next(code_it)
if args['min'] > args['max'] or args['max'] > sre_constants.MAXREPEAT:
raise InvalidCodeError('Invalid min or max value')
args['inner'] = list(_get_instructions_inner(code_it, max_pos=inner_max_pos))
_ensure_position(code_it, inner_max_pos)
next_op = sre_constants.OPCODES[next(code_it)]
if next_op not in (sre_constants.MAX_UNTIL, sre_constants.MIN_UNTIL):
raise InvalidCodeError('expected MAX_UNTIL or MIN_UNTIL to follow REPEAT')
args['next_op'] = next_op
yield (op, args)
elif op == sre_constants.GROUPREF_EXISTS:
starting_pos = code_it.count
arg = next(code_it)
skip = next(code_it)
inner_max_pos = starting_pos + skip - 2
args = {'arg': arg, 'skip': skip}
if skip >= 3 and code_it.iterable[starting_pos + skip - 2] == sre_constants.JUMP:
args['then'] = list(_get_instructions_inner(code_it, max_pos=inner_max_pos))
jump_op = sre_constants.OPCODES[next(code_it)]
if jump_op != sre_constants.JUMP:
raise InvalidCodeError('expected JUMP, got %r' % jump_op)
_ensure_position(code_it, inner_max_pos + 1)
skip = next(code_it)
inner_max_pos = code_it.count + skip - 1
args['jump_op'] = (jump_op, skip)
args['else'] = list(_get_instructions_inner(code_it, max_pos=inner_max_pos))
_ensure_position(code_it, inner_max_pos)
else:
args['then'] = list(_get_instructions_inner(code_it, max_pos=inner_max_pos))
_ensure_position(code_it, inner_max_pos)
yield (op, args)
elif op in (sre_constants.ASSERT, sre_constants.ASSERT_NOT):
skip = next(code_it)
inner_max_pos = code_it.count + skip - 1
width = next(code_it)
if width & 0x80000000:
raise InvalidCodeError('width too large')
inner = _consume_and_ensure_following(
_get_instructions_inner, code_it, inner_max_pos, sre_constants.SUCCESS)
yield (op, {'skip': skip, 'width': width, 'inner': inner})
else:
assert False, 'unhandled opcode %r' % op
if max_pos is not None and code_it.count == max_pos:
break
else:
if max_pos is not None:
raise InvalidCodeError('did not find enough codes')
def disassemble_charset(code_it, max_pos=None):
for op in code_it:
op = sre_constants.OPCODES[op]
if op == sre_constants.NEGATE:
yield (op, None)
elif op == sre_constants.LITERAL:
arg = next(code_it)
yield (op, chr(arg))
elif op in (sre_constants.RANGE, sre_constants.RANGE_IGNORE):
start = next(code_it)
stop = next(code_it)
yield (op, (chr(start), chr(stop)))
elif op == sre_constants.CHARSET:
# 256-bit bitmap
bits = list(islice(code_it, 256 // SRE_CODE_BITS))
yield (op, bits)
elif op == sre_constants.BIGCHARSET:
# nested table of bitmaps
num_blocks = next(code_it)
contents_offset = 256 / SIZEOF_SRE_CODE
contents = list(islice(code_it, contents_offset))
blocks = []
for _ in range(num_blocks):
blocks.append(list(islice(code_it, 256 // SRE_CODE_BITS)))
yield (op, (num_blocks, contents, blocks))
elif op == sre_constants.CATEGORY:
arg = next(code_it)
category = sre_constants.CHCODES[arg]
yield (op, category)
else:
assert False, 'unhandled opcode %r' % op
if max_pos is not None and code_it.count == max_pos:
break
else:
if max_pos is not None:
raise InvalidCodeError('did not find enough codes')
def _disassemble_branch(code_it):
codes = []
targets = []
while True:
skip = next(code_it)
max_pos = code_it.count + skip - 1
if skip == 0:
break
inner = list(_get_instructions_inner(code_it, max_pos=code_it.count + skip - 3))
next_op = sre_constants.OPCODES[next(code_it)]
if next_op != sre_constants.JUMP:
raise InvalidCodeError('branch must be followed by JUMP (got %r)' % next_op)
end_skip = next(code_it)
inner.append((next_op, end_skip))
codes.append(inner)
targets.append(code_it.count + end_skip - 1)
_ensure_position(code_it, max_pos)
if len(set(targets)) != 1:
raise InvalidCodeError('Not all targets are the same: %s' % targets)
return codes
def _disassemble_info(code_it):
args = {}
skip = args['skip'] = next(code_it)
end_pos = code_it.count + skip - 1
flags = args['flags'] = next(code_it)
args['min'] = next(code_it)
args['max'] = next(code_it)
if (flags & ~(sre_constants.SRE_INFO_PREFIX |
sre_constants.SRE_INFO_LITERAL |
sre_constants.SRE_INFO_CHARSET)) != 0:
raise InvalidCodeError('invalid flags %r' % flags)
if ((flags & sre_constants.SRE_INFO_PREFIX) and
(flags & sre_constants.SRE_INFO_CHARSET)):
raise InvalidCodeError('PREFIX and CHARSET are mutually exclusive')
if ((flags & sre_constants.SRE_INFO_LITERAL) and
not (flags & sre_constants.SRE_INFO_PREFIX)):
raise InvalidCodeError('LITERAL implies PREFIX')
if flags & sre_constants.SRE_INFO_PREFIX:
prefix_len = next(code_it)
ignored = next(code_it)
chars = ''.join(map(chr, islice(code_it, prefix_len)))
overlap_table = tuple(islice(code_it, prefix_len))
args['prefix'] = {
'prefix_len': prefix_len,
'ignored': ignored,
'chars': chars,
'overlap_table': overlap_table,
}
if flags & sre_constants.SRE_INFO_CHARSET:
args['charset'] = _consume_and_ensure_following(
disassemble_charset, code_it, end_pos, sre_constants.FAILURE)
if code_it.count != end_pos:
raise InvalidCodeError('incorrect skip in INFO')
return args
def _ensure_position(code_it, pos):
if code_it.count != pos:
raise InvalidCodeError('incorrect skip (%s vs. %s)' % (code_it.count, pos))
def _consume_and_ensure_following(fn, code_it, max_pos, next_code):
inner = list(fn(code_it, max_pos=max_pos - 1))
next_op = sre_constants.OPCODES[next(code_it)]
if next_op != next_code:
raise InvalidCodeError('Expected %s, got %s' % (next_code, next_op))
inner.append((next_op, None))
return inner
class _CountingIterator(object):
"""Iterator wrapper that keeps track of how many items have been consumed."""
def __init__(self, iterable):
self.iterable = iterable
self.iterator = iter(iterable)
self.count = 0
def __iter__(self):
return self
def __next__(self):
value = next(self.iterator)
self.count += 1
return value
```
|
{
"source": "JelleZijlstra/sqltree",
"score": 3
}
|
#### File: sqltree/sqltree/fixit.py
```python
import libcst as cst
from fixit import CstLintRule, InvalidTestCase as Invalid, ValidTestCase as Valid
from libcst.helpers import get_full_name_for_node
from sqltree.parser import ParseError
from .formatter import format
class SqlFormatRule(CstLintRule):
"""
Uses sqltree to format SQL queries.
"""
MESSAGE = "Reformat SQL"
VALID = [
Valid(
'''
sql = """
SELECT *
FROM x
"""
'''
),
Valid(
'''
def f():
sql = """
SELECT *
FROM x
"""
def g():
x
def weirdly_indented():
if x:
sql = """
SELECT y
FROM z
"""
'''
),
]
INVALID = [
Invalid(
"sql = 'select * from x'",
line=1,
column=7,
expected_replacement='''
sql = """
SELECT *
FROM x
"""''',
)
]
current_indent: int = 0
default_indent: int = 0
def visit_Module(self, node: cst.Module) -> None:
self.default_indent = len(node.default_indent.replace("\t", " " * 4))
def visit_IndentedBlock(self, node: cst.IndentedBlock) -> None:
self.current_indent += self._indent_of(node)
def leave_IndentedBlock(self, node: cst.IndentedBlock) -> None:
self.current_indent -= self._indent_of(node)
def _indent_of(self, node: cst.IndentedBlock) -> int:
if node.indent is not None:
return len(node.indent.replace("\t", " " * 4))
else:
return self.default_indent
def visit_Call(self, node: cst.Call) -> None:
# TODO format specific calls
pass
def visit_Assign(self, node: cst.Assign) -> None:
full_name = get_full_name_for_node(node.targets[0].target)
if full_name == "sql" and isinstance(node.value, cst.SimpleString):
query = node.value.evaluated_value
try:
formatted = format(query, indent=self.current_indent + 4)
except ParseError as e:
self.report(node, message=str(e))
else:
# TODO escaping, preserve prefix
replacement = f'"""{formatted}"""'
if replacement != node.value.value:
new_str = node.value.with_changes(value=replacement)
self.report(node.value, replacement=new_str)
```
#### File: sqltree/sqltree/formatter.py
```python
import argparse
from contextlib import contextmanager
from dataclasses import dataclass, field
from typing import Generator, Iterator, List, Optional, Sequence, Tuple, Type
from . import parser as p
from .dialect import DEFAULT_DIALECT, Dialect
from .sqltree import sqltree
from .tokenizer import Token
from .visitor import Transformer, Visitor
DEFAULT_LINE_LENGTH = 88 # like black
INDENT_SIZE = 4
class LineTooLong(Exception):
"""Raised internally when a line is about to get too long."""
State = Tuple[int, int, int]
@dataclass
class Formatter(Visitor[None]):
dialect: Dialect
line_length: int = DEFAULT_LINE_LENGTH
indent: int = 0
lines: List[List[str]] = field(default_factory=list)
should_skip_comments: bool = False
current_line_length: int = 0
can_split: bool = False
line_has_content: bool = False
node_stack: List[p.Node] = field(default_factory=list)
def format(self, tree: p.Node) -> str:
self.visit(tree)
sql = "".join(piece for line in self.lines for piece in line)
if self.indent > 0:
return f"\n{sql}\n{' ' * (self.indent - INDENT_SIZE)}"
else:
return sql + "\n"
@contextmanager
def add_indent(self) -> Iterator[None]:
self.indent += INDENT_SIZE
try:
yield
finally:
self.indent -= INDENT_SIZE
@contextmanager
def override_can_split(self) -> Iterator[State]:
previous_val = self.can_split
self.can_split = True
try:
yield self.get_state()
finally:
self.can_split = previous_val
def get_state(self) -> State:
num_pieces = len(self.lines[-1]) if self.lines else 0
return (len(self.lines), num_pieces, self.current_line_length)
def restore_state(self, state: State) -> None:
num_lines, num_pieces, current_line_length = state
del self.lines[num_lines:]
if num_lines > 0:
del self.lines[-1][num_pieces:]
self.current_line_length = current_line_length
assert self.get_state() == state
def write(self, text: str) -> None:
if not self.lines:
self.start_new_line()
self.lines[-1].append(text)
self.line_has_content = True
self.current_line_length += len(text)
if self.can_split and self.current_line_length > self.line_length:
raise LineTooLong
def add_space(self) -> None:
if self.lines and self.lines[-1] and not self.lines[-1][-1].endswith(" "):
self.write(" ")
def start_new_line(self) -> None:
if self.lines and not self.line_has_content:
return
if self.lines and any(not text.isspace() for text in self.lines[-1]):
self.lines[-1].append("\n")
self.current_line_length = self.indent
self.line_has_content = False
line = []
self.lines.append(line)
if self.indent:
line.append(" " * self.indent)
def force_indentation(self) -> None:
if self.line_has_content:
self.start_new_line()
else:
needed = self.indent - self.current_line_length
self.lines[-1].append(" " * needed)
def clear_trailing_space(self) -> None:
if self.lines[-1] and self.lines[-1][-1].endswith(" "):
self.lines[-1][-1] = self.lines[-1][-1][:-1]
self.current_line_length -= 1
def add_comments(self, comments: Sequence[Token]) -> None:
if comments:
self.add_space()
for comment in comments:
self.write(comment.text.rstrip("\n"))
self.start_new_line()
def add_comments_from_leaf(self, node: p.Leaf) -> None:
if not self.should_skip_comments:
self.add_comments(node.token.comments)
def visit_trailing_comma(self, node: Optional[p.Punctuation]) -> None:
if node is not None:
self.visit(node)
self.add_space()
@contextmanager
def skip_comments(self) -> Generator[None, None, None]:
old_value = self.should_skip_comments
try:
self.should_skip_comments = True
yield
finally:
self.should_skip_comments = old_value
def visit(self, node: p.Node) -> None:
self.node_stack.append(node)
try:
if isinstance(node, p.Statement):
for comment in node.leading_comments:
self.write(comment.text.rstrip("\n"))
self.start_new_line()
super().visit(node)
if isinstance(node, p.Leaf):
self.add_comments_from_leaf(node)
finally:
self.node_stack.pop()
def maybe_visit(
self,
node: Optional[p.Node],
*,
else_write: Optional[str] = None,
add_space: bool = False,
) -> None:
if node is None:
if else_write is not None:
self.write(else_write)
if add_space:
self.add_space()
return None
self.visit(node)
if add_space:
self.add_space()
def parent_isinstance(self, node_cls: Type[p.Node]) -> bool:
if len(self.node_stack) < 2:
return False
return isinstance(self.node_stack[-2], node_cls)
def visit_KeywordSequence(self, node: p.KeywordSequence) -> None:
# Move all the comments to the end
with self.skip_comments():
for i, kw in enumerate(node.keywords):
if i > 0:
self.add_space()
self.visit(kw)
for kw in node.keywords:
self.add_comments(kw.token.comments)
def visit_FromClause(self, node: p.FromClause) -> None:
if not self.parent_isinstance(p.Delete):
self.start_new_line()
if node.kw is None:
self.write("FROM")
else:
self.visit(node.kw)
self.write_comma_list(node.table)
def visit_WhereClause(self, node: p.WhereClause) -> None:
self.start_new_line()
self.visit(node.kw)
self.add_space()
self.visit(node.conditions)
def visit_HavingClause(self, node: p.HavingClause) -> None:
self.start_new_line()
self.visit(node.kw)
self.add_space()
self.visit(node.conditions)
def write_comma_list(
self, nodes: Sequence[p.WithTrailingComma[p.Node]], with_space: bool = True
) -> None:
with self.override_can_split() as state:
try:
if with_space and nodes:
self.add_space()
for node in nodes:
self.visit(node)
except LineTooLong:
pass
else:
return
# Split any enclosing list first
if self.can_split:
raise LineTooLong
self.restore_state(state)
with self.add_indent():
for node in nodes:
self.start_new_line()
self.visit(node)
self.clear_trailing_space()
self.start_new_line()
def visit_GroupByClause(self, node: p.GroupByClause) -> None:
self.start_new_line()
self.visit(node.kwseq)
self.write_comma_list(node.expr)
def visit_OrderByClause(self, node: p.OrderByClause) -> None:
self.start_new_line()
self.visit(node.kwseq)
self.write_comma_list(node.expr)
def visit_SetClause(self, node: p.SetClause) -> None:
self.start_new_line()
self.visit(node.kw)
self.write_comma_list(node.assignments)
def visit_IntoClause(self, node: p.IntoClause) -> None:
if node.kw is not None:
self.visit(node.kw)
else:
self.write("INTO")
self.add_space()
self.visit(node.table)
self.maybe_visit(node.col_names)
def visit_ColNameList(self, node: p.ColNameList) -> None:
self.visit(node.open_paren)
self.write_comma_list(node.col_names, with_space=False)
self.visit(node.close_paren)
def visit_Subselect(self, node: p.Subselect) -> None:
if node.left_paren is None:
self.visit(node.select)
else:
self.visit(node.left_paren)
with self.add_indent():
self.visit(node.select)
assert node.right_paren is not None, "both parens must be set"
self.visit(node.right_paren)
def visit_ValuesClause(self, node: p.ValuesClause) -> None:
self.start_new_line()
if node.kw.text == "VALUES":
self.visit(node.kw)
else:
self.write("VALUES")
self.add_comments_from_leaf(node.kw)
self.write_comma_list(node.value_lists)
def visit_DefaultValues(self, node: p.DefaultValues) -> None:
self.start_new_line()
self.visit(node.kwseq)
def visit_ValueList(self, node: p.ValueList) -> None:
self.visit(node.open_paren)
self.write_comma_list(node.values, with_space=False)
self.visit(node.close_paren)
def visit_OdkuClause(self, node: p.OdkuClause) -> None:
self.start_new_line()
self.visit(node.kwseq)
self.write_comma_list(node.assignments)
def visit_Assignment(self, node: p.Assignment) -> None:
self.visit(node.col_name)
self.add_space()
self.visit(node.eq_punc)
self.add_space()
self.visit(node.value)
def visit_Default(self, node: p.Default) -> None:
self.visit(node.kw)
def visit_All(self, node: p.All) -> None:
self.visit(node.kw)
def visit_LimitClause(self, node: p.LimitClause) -> None:
self.start_new_line()
self.visit(node.kw)
self.add_space()
self.visit(node.row_count)
def visit_SelectLimitClause(self, node: p.SelectLimitClause) -> None:
self.start_new_line()
self.visit(node.kw)
self.add_space()
self.visit(node.row_count)
if node.offset is not None:
self.add_space()
self.write("OFFSET")
if node.offset_leaf is not None:
self.add_comments_from_leaf(node.offset_leaf)
self.add_space()
self.visit(node.offset)
def visit_CommonTableExpression(self, node: p.CommonTableExpression) -> None:
self.visit(node.table_name)
self.add_space()
if node.col_names is not None:
self.visit(node.col_names)
self.add_space()
self.visit(node.as_kw)
self.add_space()
self.visit(node.subquery)
def visit_WithClause(self, node: p.WithClause) -> None:
self.start_new_line()
self.visit(node.kw)
if node.recursive_kw is not None:
self.add_space()
self.visit(node.recursive_kw)
self.write_comma_list(node.ctes)
def visit_UsingClause(self, node: p.UsingClause) -> None:
self.start_new_line()
self.visit(node.kw)
self.write_comma_list(node.tables)
def visit_Select(self, node: p.Select) -> None:
self.maybe_visit(node.with_clause)
self.start_new_line()
self.visit(node.select_kw)
for kw in node.modifiers:
self.add_space()
self.visit(kw)
self.write_comma_list(node.select_exprs)
self.maybe_visit(node.from_clause)
self.maybe_visit(node.where)
self.maybe_visit(node.group_by)
self.maybe_visit(node.having)
self.maybe_visit(node.order_by)
self.maybe_visit(node.limit)
def visit_Delete(self, node: p.Delete) -> None:
self.maybe_visit(node.with_clause)
self.start_new_line()
self.visit(node.delete_kw)
self.add_space()
self.visit(node.from_clause)
self.maybe_visit(node.using_clause)
self.maybe_visit(node.where)
self.maybe_visit(node.order_by)
self.maybe_visit(node.limit)
def visit_Update(self, node: p.Update) -> None:
self.maybe_visit(node.with_clause)
self.start_new_line()
self.visit(node.update_kw)
self.add_space()
self.visit(node.table)
self.visit(node.set_clause)
self.maybe_visit(node.where)
self.maybe_visit(node.order_by)
self.maybe_visit(node.limit)
def _visit_insert_values(self, node: p.InsertValues) -> None:
if isinstance(node, p.Subselect) and node.left_paren is not None:
self.add_space()
self.visit(node)
def visit_Insert(self, node: p.Insert) -> None:
self.start_new_line()
self.visit(node.insert_kw)
self.add_space()
if node.ignore_kw is not None:
self.visit(node.ignore_kw)
self.add_space()
self.visit(node.into)
self._visit_insert_values(node.values)
self.maybe_visit(node.odku)
def visit_Replace(self, node: p.Replace) -> None:
self.start_new_line()
self.visit(node.replace_kw)
self.add_space()
self.visit(node.into)
self._visit_insert_values(node.values)
def visit_Keyword(self, node: p.Keyword) -> None:
self.write(node.text.upper())
def visit_Punctuation(self, node: p.Punctuation) -> None:
self.write(node.text)
def visit_KeywordIdentifier(self, node: p.KeywordIdentifier) -> None:
self.visit(node.keyword)
def visit_Identifier(self, node: p.Identifier) -> None:
if node.text.upper() in self.dialect.get_keywords():
delimiter = self.dialect.get_identifier_delimiter()
self.write(f"{delimiter}{node.text}{delimiter}")
else:
self.write(node.text)
def visit_Dotted(self, node: p.Dotted) -> None:
self.visit(node.left)
self.visit(node.dot)
self.visit(node.right)
def visit_Placeholder(self, node: p.Placeholder) -> None:
self.write(node.text)
def visit_PlaceholderClause(self, node: p.PlaceholderClause) -> None:
self.start_new_line()
self.visit(node.placeholder)
def visit_IntegerLiteral(self, node: p.IntegerLiteral) -> None:
self.write(str(node.value))
def visit_StringLiteral(self, node: p.StringLiteral) -> None:
# ' is more portable
self.write(f"'{node.value}'")
def visit_Star(self, node: p.Star) -> None:
self.write("*")
def visit_WithTrailingComma(self, node: p.WithTrailingComma) -> None:
self.visit(node.node)
self.visit_trailing_comma(node.trailing_comma)
def visit_FunctionCall(self, node: p.FunctionCall) -> None:
self.visit(node.callee)
self.visit(node.left_paren)
self.write_comma_list(node.args, with_space=False)
self.visit(node.right_paren)
def visit_Parenthesized(self, node: p.Parenthesized) -> None:
self.visit(node.left_punc)
self.visit(node.inner)
self.visit(node.right_punc)
def visit_ExprList(self, node: p.ExprList) -> None:
self.visit(node.left_paren)
self.write_comma_list(node.exprs, with_space=False)
self.visit(node.right_paren)
def visit_WhenThen(self, node: p.WhenThen) -> None:
self.visit(node.when_kw)
self.add_space()
self.visit(node.condition)
self.add_space()
self.visit(node.then_kw)
self.add_space()
self.visit(node.result)
def visit_ElseClause(self, node: p.ElseClause) -> None:
self.visit(node.else_kw)
self.add_space()
self.visit(node.expr)
def visit_CaseExpression(self, node: p.CaseExpression) -> None:
self.visit(node.case_kw)
self.add_space()
self.maybe_visit(node.value, add_space=True)
for when_then in node.when_thens:
self.add_space()
self.visit(when_then)
self.add_space()
self.maybe_visit(node.else_clause, add_space=True)
self.visit(node.end_kw)
def visit_BinOp(self, node: p.BinOp) -> None:
precedence = node.get_precedence()
if precedence >= p.MIN_BOOLEAN_PRECEDENCE:
self.clear_trailing_space()
with self.add_indent():
self.visit_BinOp_multiline(node)
if self.parent_isinstance(p.Parenthesized):
self.start_new_line()
else:
self.visit(node.left)
self.add_space()
self.visit(node.op)
self.add_space()
self.visit(node.right)
def visit_BinOp_multiline(self, node: p.BinOp) -> None:
precedence = node.get_precedence()
self.force_indentation()
self._maybe_multiline(node.left, precedence)
self.start_new_line()
self.visit(node.op)
self.add_space()
self._maybe_multiline(node.right, precedence)
def _maybe_multiline(self, node: p.Node, precedence: int) -> None:
if isinstance(node, p.BinOp) and node.get_precedence() == precedence:
self.visit_BinOp_multiline(node)
else:
self.visit(node)
def visit_SelectExpr(self, node: p.SelectExpr) -> None:
self.visit(node.expr)
if node.as_kw is not None and node.alias is not None:
self.add_space()
self.visit(node.as_kw)
self.add_space()
self.visit(node.alias)
def visit_OrderByExpr(self, node: p.OrderByExpr) -> None:
self.visit(node.expr)
if node.direction_kw is not None:
self.add_space()
self.visit(node.direction_kw)
def visit_IndexHint(self, node: p.IndexHint) -> None:
self.start_new_line()
self.visit(node.intro_kw)
self.add_space()
self.visit(node.kind_kw)
if node.for_what is not None:
self.add_space()
if node.for_kw is not None:
self.visit(node.for_kw)
else:
self.write("FOR")
self.add_space()
self.visit(node.for_what)
self.visit(node.left_paren)
self.write_comma_list(node.index_list, with_space=False)
self.visit(node.right_paren)
def visit_JoinOn(self, node: p.JoinOn) -> None:
self.start_new_line()
self.visit(node.kw)
self.add_space()
self.visit(node.search_condition)
def visit_JoinUsing(self, node: p.JoinUsing) -> None:
self.start_new_line()
self.visit(node.kw)
self.add_space()
self.visit(node.left_paren)
self.write_comma_list(node.join_column_list)
self.visit(node.right_paren)
def visit_SimpleJoinedTable(self, node: p.SimpleJoinedTable):
self.visit_join(node)
def visit_LeftRightJoinedTable(self, node: p.LeftRightJoinedTable):
self.visit_join(node)
def visit_NaturalJoinedTable(self, node: p.NaturalJoinedTable):
self.visit_join(node)
def visit_join(self, node: p.JoinedTable, *, skip_indent: bool = False) -> None:
if isinstance(node, (p.SimpleJoinedTable, p.LeftRightJoinedTable)):
join_spec = node.join_specification
else:
join_spec = None
if isinstance(node, p.SimpleJoinedTable):
kws = [node.inner_cross, node.join_kw]
elif isinstance(node, p.LeftRightJoinedTable):
kws = [node.left_right, node.outer_kw, node.join_kw]
else:
kws = [node.natural_kw, node.left_right, node.inner_outer, node.join_kw]
if isinstance(
node.left,
(p.SimpleJoinedTable, p.LeftRightJoinedTable, p.NaturalJoinedTable),
):
self.visit(node.left)
else:
self.clear_trailing_space()
with self.add_indent():
self.start_new_line()
self.visit(node.left)
self.start_new_line()
for kw in kws:
if kw is not None:
self.visit(kw)
with self.add_indent():
self.start_new_line()
self.visit(node.right)
self.maybe_visit(join_spec)
def visit_SimpleTableFactor(self, node: p.SimpleTableFactor) -> None:
self.visit(node.table_name)
if node.alias is not None:
self.add_space()
self.maybe_visit(node.as_kw, else_write="AS", add_space=True)
self.visit(node.alias)
for index_hint in node.index_hint_list:
self.start_new_line()
self.visit(index_hint.node)
if index_hint.trailing_comma is not None:
self.visit(index_hint.trailing_comma)
def visit_SubQueryFactor(self, node: p.SubqueryFactor) -> None:
self.maybe_visit(node.lateral_kw, add_space=True)
self.visit(node.table_subquery)
self.maybe_visit(node.as_kw, else_write="AS", add_space=True)
self.visit(node.alias)
if node.col_list:
self.maybe_visit(node.left_paren, else_write="(")
self.write_comma_list(node.col_list)
self.maybe_visit(node.right_paren, else_write=")")
def visit_TableReferenceList(self, node: p.TableReferenceList) -> None:
self.visit(node.left_paren)
self.write_comma_list(node.references, with_space=False)
self.visit(node.right_paren)
def format_tree(
tree: p.Node,
*,
dialect: Dialect = DEFAULT_DIALECT,
line_length: int = DEFAULT_LINE_LENGTH,
indent: int = 0,
) -> str:
return Formatter(dialect, line_length=line_length, indent=indent).format(tree)
def format(
sql: str,
dialect: Dialect = DEFAULT_DIALECT,
*,
line_length: int = DEFAULT_LINE_LENGTH,
indent: int = 0,
) -> str:
return format_tree(
sqltree(sql, dialect), dialect=dialect, line_length=line_length, indent=indent
)
def transform_and_format(sql: str, transformer: Transformer) -> str:
tree = sqltree(sql)
new_tree = transformer.visit(tree)
return format_tree(new_tree)
if __name__ == "__main__":
parser = argparse.ArgumentParser("sqltree")
parser.add_argument("sql", help="SQL string to format")
args = parser.parse_args()
print(format(args.sql), end="")
```
#### File: sqltree/sqltree/location.py
```python
from dataclasses import dataclass, field
@dataclass
class Location:
sql: str = field(repr=False)
start_index: int
end_index: int # index of the last character included in the token
def display(self) -> str:
is_past_end = self.start_index >= len(self.sql)
starting_lineno = self.sql.count("\n", 0, self.start_index)
try:
previous_newline = self.sql.rindex("\n", 0, self.start_index)
except ValueError:
previous_newline = -1
try:
following_newline = self.sql.index("\n", self.end_index)
except ValueError:
following_newline = len(self.sql)
ending_lineno = self.sql.count("\n", 0, following_newline)
lineno_length = len(str(ending_lineno))
pieces = []
pieces.append(f"{starting_lineno:{lineno_length}}: ")
pieces.append(self.sql[previous_newline + 1 : self.start_index])
if is_past_end:
matching_pieces = [" "]
else:
matching_pieces = self.sql[self.start_index : self.end_index + 1].split(
"\n"
)
leading_length = lineno_length + 2 + (self.start_index - previous_newline - 1)
remaining_carets = None
for i, piece in enumerate(matching_pieces):
if remaining_carets is not None:
pieces.append(remaining_carets)
if i > 0:
pieces.append("\n")
pieces.append(piece)
remaining_carets = "\n" + " " * leading_length + "^" * len(piece)
leading_length = lineno_length + 2
rest = self.sql[self.end_index + 1 : following_newline]
pieces.append(rest)
if remaining_carets:
pieces.append(remaining_carets)
pieces.append("\n")
return "".join(pieces)
```
#### File: sqltree/sqltree/tokenizer.py
```python
import enum
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, Iterable, Sequence, Set
from .dialect import Dialect
from .location import Location
from .peeking_iterator import PeekingIterator
class TokenizeError(Exception):
pass
class TokenType(enum.Enum):
keyword = 1
punctuation = 2
string = 3
number = 4
identifier = 5
placeholder = 6
comment = 7
eof = 8
def make(self, text: str, loc: Location) -> "Token":
return Token(self, text, loc)
@dataclass
class Token:
typ: TokenType
text: str
loc: Location
comments: Sequence["Token"] = ()
PUNCTUATION = {
".",
"(",
")",
",",
"+",
"*",
">=",
"<=",
"=",
"<>",
"!=",
"/",
"%",
"-",
"~",
"&",
"^",
"|",
">>",
"<<",
"&&",
"||",
"--", # Not a punctuation but a comment
"/*", # Also a comment
}
QUOTATIONS = {"`", "'", '"'}
def tokenize(sql: str, dialect: Dialect) -> Iterable[Token]:
# Prepare punctuation
starting_char_to_continuations: Dict[str, Set[str]] = defaultdict(set)
for punc in PUNCTUATION:
if len(punc) == 1:
starting_char_to_continuations[punc].add("")
elif len(punc) == 2:
starting_char_to_continuations[punc[0]].add(punc[1])
else:
raise TokenizeError(f"don't know how to handle {punc}")
pi = PeekingIterator(sql)
while pi.has_next():
start_index = pi.next_pos
char = pi.next()
if char.isalpha():
pi.wind_back()
text = _consume_identifier(pi)
token_type = TokenType.identifier
elif char.isspace():
continue # Skip over whitespace
elif char == "%":
next_char = pi.peek()
if next_char is not None and next_char.isalpha():
token_type = TokenType.placeholder
text = "%" + _consume_identifier(pi)
else:
token_type = TokenType.punctuation
text = "%"
elif char in starting_char_to_continuations:
token_type = TokenType.punctuation
continuations = starting_char_to_continuations[char]
if not pi.has_next():
if "" in continuations:
text = char
else:
raise TokenizeError(
f"unexpected EOF following {char} (expected one of"
f" {continuations})"
)
else:
c = pi.next()
if c in continuations:
text = char + c
if text == "--":
token_type = TokenType.comment
text += _consume_until(pi, "\n", eof_okay=True)
elif text == "/*":
token_type = TokenType.comment
chars = []
seen_star = False
for c in pi:
chars.append(c)
if seen_star and c == "/":
text += "".join(chars)
break
if c == "*":
seen_star = True
else:
seen_star = False
else:
raise TokenizeError("unexpected EOF (expected '*/')")
elif "" in c:
pi.wind_back()
text = char
else:
raise TokenizeError(
f"unexpected {c} following {char} (expected one of"
f" {continuations})"
)
elif char.isnumeric():
# TODO floats, hex, other kinds of numbers?
pi.wind_back()
token_type = TokenType.number
text = _consume_integer(pi)
elif char in QUOTATIONS:
token_type = TokenType.string
text = char + _consume_until(pi, char)
elif char == "{":
token_type = TokenType.placeholder
text = "{" + _consume_until(pi, "}")
elif char == "#":
token_type = TokenType.comment
text = "#" + _consume_until(pi, "\n", eof_okay=True)
else:
# TODO comments. Maybe we leave those out of the AST in the parser, then reattach
# them later at the nearest node we find. Or we just attach them to the token and make
# sure each token object stays in the AST.
raise TokenizeError(f"unexpected character {char}")
yield Token(token_type, text, Location(sql, start_index, pi.next_pos - 1))
def _consume_until(pi: PeekingIterator[str], end: str, eof_okay: bool = False) -> str:
chars = []
for c in pi:
chars.append(c)
# TODO backslash escapes?
if c == end:
return "".join(chars)
if eof_okay:
return "".join(chars)
raise TokenizeError(f"unexpected EOF (expected {end!r})")
def _consume_identifier(pi: PeekingIterator[str]) -> str:
chars = []
for c in pi:
if c.isalnum() or c == "_":
chars.append(c)
else:
pi.wind_back()
return "".join(chars)
return "".join(chars)
def _consume_integer(pi: PeekingIterator[str]) -> str:
chars = []
for c in pi:
if c.isnumeric():
chars.append(c)
else:
pi.wind_back()
return "".join(chars)
return "".join(chars)
```
#### File: sqltree/sqltree/visitor.py
```python
import collections.abc
from dataclasses import fields
from typing import Generic, Optional, TypeVar
from .parser import Node
T = TypeVar("T")
class Visitor(Generic[T]):
def visit(self, node: Node) -> T:
method_name = f"visit_{type(node).__name__}"
method = getattr(self, method_name, self.generic_visit)
return method(node) # type: ignore
def maybe_visit(self, node: Optional[Node]) -> Optional[T]:
if node is None:
return None
return self.visit(node)
def generic_visit(self, node: Node) -> T:
raise NotImplementedError(node)
class Transformer(Visitor[Node]):
def generic_visit(self, node: Node) -> Node:
cls = type(node)
kwargs = {}
for field in fields(node):
key = field.name
value = getattr(node, key)
if isinstance(value, Node):
kwargs[key] = self.visit(value)
elif isinstance(value, collections.abc.Sequence) and not isinstance(
value, str
):
kwargs[key] = [self.visit(member) for member in value]
else:
kwargs[key] = value
return cls(**kwargs)
```
#### File: sqltree/tests/test_tokenizer.py
```python
from functools import partial
from typing import Sequence
from sqltree.dialect import DEFAULT_DIALECT
from sqltree.location import Location
from sqltree.tokenizer import Token, TokenType, tokenize
def check(sql: str, tokens: Sequence[Token]) -> None:
actual = list(tokenize(sql, DEFAULT_DIALECT))
assert actual == tokens
def test_tokenize() -> None:
sql = "SELECT * FROM table WHERE x = 3 AND y = 'x' AND z = {x} AND alpha = %s -- x"
L = partial(Location, sql)
check(
sql,
[
TokenType.identifier.make("SELECT", L(0, 5)),
TokenType.punctuation.make("*", L(7, 7)),
TokenType.identifier.make("FROM", L(9, 12)),
TokenType.identifier.make("table", L(14, 18)),
TokenType.identifier.make("WHERE", L(20, 24)),
TokenType.identifier.make("x", L(26, 26)),
TokenType.punctuation.make("=", L(28, 28)),
TokenType.number.make("3", L(30, 30)),
TokenType.identifier.make("AND", L(32, 34)),
TokenType.identifier.make("y", L(36, 36)),
TokenType.punctuation.make("=", L(38, 38)),
TokenType.string.make("'x'", L(40, 42)),
TokenType.identifier.make("AND", L(44, 46)),
TokenType.identifier.make("z", L(48, 48)),
TokenType.punctuation.make("=", L(50, 50)),
TokenType.placeholder.make("{x}", L(52, 54)),
TokenType.identifier.make("AND", L(56, 58)),
TokenType.identifier.make("alpha", L(60, 64)),
TokenType.punctuation.make("=", L(66, 66)),
TokenType.placeholder.make("%s", L(68, 69)),
TokenType.comment.make("-- x", L(71, 74)),
],
)
def test_comment() -> None:
sql = "SELECT /* c */ * FROM table # x"
L = partial(Location, sql)
check(
sql,
[
TokenType.identifier.make("SELECT", L(0, 5)),
TokenType.comment.make("/* c */", L(7, 13)),
TokenType.punctuation.make("*", L(15, 15)),
TokenType.identifier.make("FROM", L(17, 20)),
TokenType.identifier.make("table", L(22, 26)),
TokenType.comment.make("# x", L(28, 30)),
],
)
```
|
{
"source": "JelleZijlstra/typeshed_client",
"score": 2
}
|
#### File: JelleZijlstra/typeshed_client/update_bundled.py
```python
from pathlib import Path
import shutil
import subprocess
import tempfile
def update_bundled() -> None:
ts_client = Path("typeshed_client")
assert (
ts_client.is_dir()
), "this script must be run at the root of the typeshed_client repository"
bundled_ts_dir = ts_client / "typeshed"
if bundled_ts_dir.exists():
shutil.rmtree(bundled_ts_dir)
with tempfile.TemporaryDirectory() as temp_dir_str:
temp_dir = Path(temp_dir_str)
subprocess.check_call(
["git", "clone", "https://github.com/python/typeshed.git", "--depth", "1"],
cwd=temp_dir,
)
shutil.copytree(temp_dir / "typeshed" / "stdlib", bundled_ts_dir)
subprocess.check_call(["git", "add", str(bundled_ts_dir)])
if __name__ == "__main__":
update_bundled()
```
|
{
"source": "jelliotthiggins/pyslim",
"score": 2
}
|
#### File: pyslim/tests/test_tree_sequence.py
```python
from __future__ import print_function
from __future__ import division
import tests
import unittest
import random
import os
import numpy as np
import pytest
import tskit
import msprime
import pyslim
class TestSlimTreeSequence(tests.PyslimTestCase):
def clean_example(self):
tables = tskit.TableCollection(sequence_length=100)
tables.populations.add_row()
tables.populations.add_row()
tables.individuals.add_row()
tables.nodes.add_row(time=0, flags=tskit.NODE_IS_SAMPLE, population=1, individual=0)
tables.nodes.add_row(time=0, flags=tskit.NODE_IS_SAMPLE, population=1, individual=0)
pyslim.annotate_defaults_tables(tables, model_type='nonWF', slim_generation=1)
return tables
def test_inconsistent_nodes(self):
clean_tables = self.clean_example()
tables = clean_tables.copy()
tables.nodes.clear()
for j, n in enumerate(clean_tables.nodes):
tables.nodes.add_row(
time=n.time, flags=n.flags,
population=j,
individual=n.individual,
metadata=n.metadata)
with pytest.raises(ValueError):
pyslim.annotate_defaults_tables(tables, model_type='nonWF', slim_generation=1)
ts = tables.tree_sequence()
with pytest.raises(ValueError):
_ = pyslim.SlimTreeSequence(ts)
def test_inconsistent_times(self):
clean_tables = self.clean_example()
tables = clean_tables.copy()
tables.nodes.clear()
for j, n in enumerate(clean_tables.nodes):
tables.nodes.add_row(time=j, flags=tskit.NODE_IS_SAMPLE, population=1, individual=0)
ts = tables.tree_sequence()
with pytest.raises(ValueError):
_ = pyslim.SlimTreeSequence(ts)
def test_bad_metadata(self):
clean_tables = self.clean_example()
tables = clean_tables.copy()
tables.metadata_schema = tskit.MetadataSchema({"type": "object", "codec": "json"})
tables.metadata = {}
ts = tables.tree_sequence()
with pytest.raises(ValueError):
_ = pyslim.SlimTreeSequence(ts)
# tmp_path is a pytest fixture, and is a pathlib.Path object
def test_slim_generation(self, tmp_path):
# tests around awkward backwards-compatible patch for setting slim_generation
ts = self.get_slim_example(name="recipe_nonWF")
assert ts.slim_generation == ts.metadata['SLiM']['generation']
new_sg = 12345
ts.slim_generation = new_sg
assert ts.slim_generation == new_sg
# check persists through dump/load
temp_file = tmp_path / "temp.trees"
ts.dump(temp_file.name)
loaded_ts = pyslim.load(temp_file.name)
assert loaded_ts.slim_generation == new_sg
assert loaded_ts.metadata['SLiM']['generation'] == new_sg
# check persists through recapitate
recap = ts.recapitate(recombination_rate=1e-8)
assert recap.slim_generation == new_sg
# check persists through simplify
simp = ts.simplify(ts.samples())
assert simp.slim_generation == new_sg
class TestSlimTime(tests.PyslimTestCase):
# Tests for slim_time()
def test_slim_time(self):
for ts in self.get_slim_examples(init_mutated=False):
for mut in ts.mutations():
mut_time = max([x['slim_time'] for x in mut.metadata['mutation_list']])
assert mut_time == ts.slim_time(mut.time)
# the mutations in "init_mutated" examples have mutations that are *added*
# in *early*, and so their times match in that stage.
for ts in self.get_slim_examples(init_mutated=True):
for mut in ts.mutations():
mut_time = max([x['slim_time'] for x in mut.metadata['mutation_list']])
assert mut_time == ts.slim_time(mut.time, stage="early")
class TestMutate(tests.PyslimTestCase):
# Tests for making a tree sequence a SlimTreeSequence
# again after msprime.mutate.
def test_mutate(self):
for ts in self.get_slim_examples():
mts = msprime.mutate(ts, rate=1e-8, random_seed=5)
pts = pyslim.SlimTreeSequence(mts)
assert ts.metadata == pts.metadata
class TestRecapitate(tests.PyslimTestCase):
'''
Tests for recapitation.
'''
def check_recap_consistency(self, ts, recap):
assert ts.slim_generation == recap.slim_generation
assert all(tree.num_roots == 1 for tree in recap.trees())
ts_samples = list(ts.samples())
for u in recap.samples():
n1 = recap.node(u)
assert n1.individual >= 0
i1 = recap.individual(n1.individual)
remembered = ((pyslim.INDIVIDUAL_REMEMBERED & i1.flags) > 0)
alive = ((pyslim.INDIVIDUAL_ALIVE & i1.flags) > 0)
assert alive or remembered
assert u in ts_samples
n2 = ts.node(u)
assert n1.time == n2.time
assert n1.individual == n2.individual
assert n1.flags == n2.flags
assert n1.metadata == n2.metadata
assert n1.population == n2.population
assert ts.num_populations <= recap.num_populations
for k in range(ts.num_populations):
p1 = ts.population(k)
p2 = recap.population(k)
assert p1.metadata == p2.metadata
def test_recapitate_errors(self):
ts = next(self.get_slim_examples())
with pytest.raises(ValueError):
_ = ts.recapitate(
recombination_rate=0.0,
keep_first_generation=True)
def test_recapitation(self):
for ts in self.get_slim_examples():
if ts.num_populations <= 2:
# if not we need migration rates
recomb_rate = 1.0 / ts.sequence_length
recap = ts.recapitate(recombination_rate=recomb_rate)
# there should be no new mutations
assert ts.num_mutations == recap.num_mutations
assert ts.num_sites == recap.num_sites
assert list(ts.tables.sites.position) == list(recap.tables.sites.position)
self.check_recap_consistency(ts, recap)
for t in recap.trees():
assert t.num_roots == 1
recap = ts.recapitate(recombination_rate=recomb_rate, Ne=1e-6)
self.check_recap_consistency(ts, recap)
if ts.slim_generation < 200:
for t in recap.trees():
assert t.num_roots == 1
assert abs(recap.node(t.root).time - recap.slim_generation) < 1e-4
class TestIndividualMetadata(tests.PyslimTestCase):
# Tests for extra stuff related to Individuals.
def test_individual_derived_info(self):
for ts in self.get_slim_examples():
for ind in ts.individuals():
for n in ind.nodes:
assert ts.node(n).population == ind.population
assert ts.node(n).time == ind.time
def test_individual_embellishments(self):
# Test the individual additional information.
for ts in self.get_slim_examples():
is_wf = (ts.metadata["SLiM"]["model_type"] == "WF")
for j, ind in enumerate(ts.individuals()):
assert ts.individual_times[j] == ind.time
if is_wf:
assert ts.individual_ages[j] == 0
else:
assert ts.individual_ages[j] == ind.metadata["age"]
assert ts.individual_populations[j] == ind.population
assert np.array_equal(ts.individual_locations[j], ind.location)
def test_first_gen_nodes(self):
# check that all the roots of the trees are present
for ts in self.get_slim_examples():
root_time = ts.slim_generation
if (ts.metadata['SLiM']['stage'] == 'early'
or ts.metadata['SLiM']['model_type'] == 'nonWF'):
root_time -= 1
for t in ts.trees():
for u in t.roots:
assert ts.node(u).time == root_time
class TestMutationMetadata(tests.PyslimTestCase):
'''
Tests for extra stuff related to Mutations.
'''
def test_slim_time(self):
# check that slim_times make sense
for ts in self.get_slim_examples(init_mutated=False):
# Mutation's slim_times are one less than the corresponding node's slim times
# in WF models, but not in WF models, for some reason.
is_wf = (ts.metadata["SLiM"]["model_type"] == "WF")
for mut in ts.mutations():
node_slim_time = ts.slim_generation - ts.node(mut.node).time
mut_slim_time = max([u["slim_time"] for u in mut.metadata["mutation_list"]])
assert node_slim_time >= mut_slim_time
class TestIndividualAges(tests.PyslimTestCase):
# tests for individuals_alive_at and individual_ages_at
def test_errors(self):
ts = next(self.get_slim_examples(everyone=True))
for stage in ['abcd', 10, []]:
with pytest.raises(ValueError):
ts.individuals_alive_at(0, stage=stage)
with pytest.raises(ValueError):
ts.individuals_alive_at(0, remembered_stage=stage)
with pytest.raises(ValueError):
ts.individual_ages_at(0, stage=stage)
def test_mismatched_remembered_stage(self):
for ts, ex in self.get_slim_examples(pedigree=True, WF=True, return_info=True):
info = ex['info']
if "remembered_early" in ex:
with pytest.warns(UserWarning):
ts.individuals_alive_at(0, remembered_stage="late")
else:
with pytest.warns(UserWarning):
ts.individuals_alive_at(0, remembered_stage="early")
def test_population(self):
for ts in self.get_slim_examples(multipop=True, remembered_early=False):
all_inds = ts.individuals_alive_at(0)
for p in range(ts.num_populations):
sub_inds = ts.individuals_alive_at(0, population=p)
assert set(sub_inds) == set(all_inds[ts.individual_populations == p])
sub_inds = ts.individuals_alive_at(0, population=[p])
assert set(sub_inds) == set(all_inds[ts.individual_populations == p])
sub_inds = ts.individuals_alive_at(0, population=np.arange(p))
assert set(sub_inds) == set(all_inds[ts.individual_populations != p])
def test_samples_only(self):
for ts in self.get_slim_examples(nonWF=True, remembered_early=False):
all_inds = ts.individuals_alive_at(0)
assert set(all_inds) == set(ts.individuals_alive_at(0, samples_only=False))
sub_inds = np.random.choice(all_inds, size=min(len(all_inds), 4), replace=False)
flags = np.array([n.flags & (tskit.NODE_IS_SAMPLE * n.individual in sub_inds)
for n in ts.nodes()], dtype=np.uint32)
tables = ts.tables
tables.nodes.flags = flags
new_ts = pyslim.SlimTreeSequence(tables.tree_sequence())
assert set(sub_inds) == set(new_ts.individuals_alive_at(0, samples_only=True))
def test_after_simplify(self):
for ts in self.get_slim_examples(remembered_early=False):
sts = ts.simplify()
orig_inds = ts.individuals_alive_at(0)
simp_inds = sts.individuals_alive_at(0)
odict = {ts.individual(i).metadata["pedigree_id"]: i for i in orig_inds}
sdict = {sts.individual(i).metadata["pedigree_id"]: i for i in simp_inds}
for slim_id in odict:
i = odict[slim_id]
ind = ts.individual(i)
n = ts.node(ind.nodes[0])
if n.flags & tskit.NODE_IS_SAMPLE:
assert slim_id in sdict
def test_ages(self):
for ts, ex in self.get_slim_examples(pedigree=True, return_info=True):
info = ex['info']
remembered_stage = 'early' if 'remembered_early' in ex else 'late'
assert remembered_stage == ts.metadata['SLiM']['stage']
max_time_ago = ts.slim_generation
if remembered_stage == 'early':
max_time_ago -= 1
for time in range(0, max_time_ago):
# if written out during 'early' in a WF model,
# tskit time 0 will be the SLiM time step *before* slim_generation
slim_time = ts.slim_generation - time
if remembered_stage == 'early' and ts.metadata["SLiM"]["model_type"] == "WF":
slim_time -= 1
if remembered_stage == 'early' and time == 0:
# if we remember in early we don't know who's still there
# in late of the last time step
check_stages = ('early',)
else:
check_stages = ('early', 'late')
for stage in check_stages:
alive = ts.individuals_alive_at(
time,
stage=stage,
remembered_stage=remembered_stage)
ages = ts.individual_ages_at(
time,
stage=stage,
remembered_stage=remembered_stage)
for ind in ts.individuals():
if 'everyone' in ex or ind.time == 0:
slim_id = ind.metadata["pedigree_id"]
assert slim_id in info
slim_alive = (slim_time, stage) in info[slim_id]['age']
pyslim_alive = ind.id in alive
print(time, (slim_time, stage))
print(ind)
print(info[slim_id])
print(slim_alive, pyslim_alive)
assert slim_alive == pyslim_alive
if slim_alive:
slim_age = info[slim_id]['age'][(slim_time, stage)]
if ts.metadata["SLiM"]["model_type"] == "WF":
# SLiM records -1 but we return 0 in late and 1 in early
slim_age = 0 + (stage == 'early')
print('age:', ages[ind.id], slim_age)
assert ages[ind.id] == slim_age
else:
assert np.isnan(ages[ind.id])
class TestHasIndividualParents(tests.PyslimTestCase):
def verify_has_parents(self, ts):
right_answer = np.repeat(True, ts.num_individuals)
node_indivs = ts.tables.nodes.individual
parent_ids = [set() for _ in ts.individuals()]
node_parent_ids = [set() for _ in ts.nodes()]
for t in ts.trees():
for i in ts.individuals():
if len(i.nodes) != 2:
right_answer[i.id] = False
for n in i.nodes:
pn = t.parent(n)
if pn == tskit.NULL:
right_answer[i.id] = False
else:
p = node_indivs[t.parent(n)]
if p == tskit.NULL:
right_answer[i.id] = False
else:
ptime = ts.individual_times[p]
parent_alive = True
if ts.metadata["SLiM"]["model_type"] == "WF":
if i.time + 1 != ptime:
parent_alive = False
else:
pdeath = ptime - ts.individual_ages[p]
if i.time + 1 < pdeath:
parent_alive = False
if not parent_alive:
right_answer[i.id] = False
else:
parent_ids[i.id].add(p)
node_parent_ids[n].add(p)
for j, p in enumerate(parent_ids):
if len(p) == 0:
right_answer[j] = False
for j, p in enumerate(node_parent_ids):
if len(p) != 1:
ind = ts.node(j).individual
if ind != tskit.NULL:
right_answer[ts.node(j).individual] = False
right_parents = []
for j, p in enumerate(parent_ids):
if right_answer[j]:
for pp in p:
right_parents.append([pp, j])
has_parents = ts.has_individual_parents()
right_parents = np.sort(np.array(right_parents), axis=0)
parents = np.sort(ts.individual_parents(), axis=0)
assert np.array_equal(right_answer, has_parents)
print("right:", right_parents)
print("pyslim:", parents)
assert np.array_equal(right_parents, parents)
def get_first_gen(self, ts):
root_time = ts.metadata["SLiM"]["generation"]
if ts.metadata['SLiM']['model_type'] != 'WF' or ts.metadata['SLiM']['stage'] != 'late':
root_time -= 1
first_gen = set(ts.tables.nodes.individual[ts.tables.nodes.time == root_time])
first_gen.discard(tskit.NULL)
return np.array(list(first_gen), dtype='int')
def test_everyone(self):
# since everyone is recorded, only the initial individuals should
# not have parents
for ts in self.get_slim_examples(everyone=True):
right_answer = np.repeat(True, ts.num_individuals)
first_gen = self.get_first_gen(ts)
right_answer[first_gen] = False
has_parents = ts.has_individual_parents()
assert np.array_equal(right_answer, has_parents)
self.verify_has_parents(ts)
def test_post_recap(self):
# the same should be true after recapitation
for ts in self.get_slim_examples(everyone=True):
right_answer = np.repeat(True, ts.num_individuals)
first_gen = self.get_first_gen(ts)
right_answer[first_gen] = False
assert(ts.num_populations <= 2)
ts = ts.recapitate(recombination_rate=0.01)
assert(ts.num_individuals == ts.num_individuals)
has_parents = ts.has_individual_parents()
assert np.array_equal(right_answer, has_parents)
self.verify_has_parents(ts)
def test_post_simplify(self):
for ts in self.get_slim_examples(everyone=True):
keep_indivs = np.random.choice(
np.where(ts.individual_times < ts.slim_generation - 1)[0],
size=30, replace=False)
keep_nodes = []
for i in keep_indivs:
keep_nodes.extend(ts.individual(i).nodes)
ts = ts.simplify(samples=keep_nodes, filter_individuals=True)
assert(ts.num_populations <= 2)
ts = ts.recapitate(recombination_rate=0.01)
has_parents = ts.has_individual_parents()
assert sum(has_parents) > 0
self.verify_has_parents(ts)
def test_pedigree_has_parents(self):
for ts, ex in self.get_slim_examples(pedigree=True, return_info=True):
has_parents = ts.has_individual_parents()
info = ex['info']
slim_map = {}
for ind in ts.individuals():
slim_map[ind.metadata["pedigree_id"]] = ind.id
for hasp, ind in zip(has_parents, ts.individuals()):
slim_parents = info[ind.metadata["pedigree_id"]]['parents']
slim_hasp = len(slim_parents) > 0
for p in slim_parents:
if p not in slim_map:
slim_hasp = False
assert hasp == slim_hasp
def test_pedigree_parents(self):
for ts, ex in self.get_slim_examples(pedigree=True, return_info=True):
has_parents = ts.has_individual_parents()
parents = ts.individual_parents()
info = ex['info']
slim_map = {}
for ind in ts.individuals():
slim_map[ind.metadata["pedigree_id"]] = ind.id
ts_to_slim = {sid: [] for sid in slim_map}
for (pa, ch) in parents:
assert pa >= 0 and pa < ts.num_individuals
assert ch >= 0 and pa < ts.num_individuals
pa_ind = ts.individual(pa).metadata["pedigree_id"]
ch_ind = ts.individual(ch).metadata["pedigree_id"]
ts_to_slim[ch_ind].append(pa_ind)
for ind in ts.individuals():
sid = ind.metadata["pedigree_id"]
a = ts_to_slim[sid]
b = [x for x in info[sid]["parents"] if x in slim_map]
if len(b) == 2:
assert set(a) == set(b)
else:
assert a == []
class TestSimplify(tests.PyslimTestCase):
'''
Our simplify() is just a wrapper around the tskit simplify.
'''
def test_simplify(self):
for ts in self.get_slim_examples():
sts = ts.simplify(map_nodes=False)
assert ts.sequence_length == sts.sequence_length
assert type(ts) == type(sts)
assert sts.samples()[0] == 0
sts, _ = ts.simplify(map_nodes=True)
assert ts.sequence_length == sts.sequence_length
assert type(ts) == type(sts)
assert sts.samples()[0] == 0
class TestReferenceSequence(tests.PyslimTestCase):
'''
Test for operations involving the reference sequence
'''
def test_reference_sequence(self):
for ts in self.get_slim_examples():
if ts.num_mutations > 0:
mut_md = ts.mutation(0).metadata
has_nucleotides = (mut_md["mutation_list"][0]["nucleotide"] >= 0)
if not has_nucleotides:
assert ts.reference_sequence == None
else:
assert type(ts.reference_sequence) == type('')
assert len(ts.reference_sequence) == ts.sequence_length
for u in ts.reference_sequence:
assert u in pyslim.NUCLEOTIDES
sts = ts.simplify(ts.samples()[:2])
assert sts.reference_sequence == ts.reference_sequence
def test_mutation_at_errors(self):
for ts in self.get_slim_examples():
u = ts.samples()[0]
with pytest.raises(ValueError):
ts.mutation_at(-2, 3)
with pytest.raises(ValueError):
ts.mutation_at(u, -3)
with pytest.raises(ValueError):
ts.mutation_at(ts.num_nodes + 2, 3)
with pytest.raises(ValueError):
ts.mutation_at(u, ts.sequence_length)
def test_nucleotide_at_errors(self):
for ts in self.get_slim_examples():
u = ts.samples()[0]
if ts.num_mutations > 0:
mut_md = ts.mutation(0).metadata
has_nucleotides = (mut_md["mutation_list"][0]["nucleotide"] >= 0)
if not has_nucleotides:
with pytest.raises(ValueError):
ts.nucleotide_at(u, 3)
def test_mutation_at(self):
random.seed(42)
for ts in self.get_slim_examples():
for _ in range(100):
node = random.randint(0, ts.num_nodes - 1)
pos = random.randint(0, ts.sequence_length - 1)
tree = ts.at(pos)
parent = tree.parent(node)
a = ts.mutation_at(node, pos)
if parent == tskit.NULL:
assert a == tskit.NULL
else:
b = ts.mutation_at(parent, pos)
c = ts.mutation_at(node, pos, ts.node(parent).time)
assert b == c
for k in np.where(node == ts.tables.mutations.node)[0]:
mut = ts.mutation(k)
if ts.site(mut.site).position == pos:
b = mut.id
assert a == b
def test_nucleotide_at(self):
random.seed(42)
for ts in self.get_slim_examples():
if ts.num_mutations > 0:
mut_md = ts.mutation(0).metadata
has_nucleotides = (mut_md["mutation_list"][0]["nucleotide"] >= 0)
if has_nucleotides:
for _ in range(100):
node = random.randint(0, ts.num_nodes - 1)
pos = random.randint(0, ts.sequence_length - 1)
tree = ts.at(pos)
parent = tree.parent(node)
a = ts.nucleotide_at(node, pos)
if parent == tskit.NULL:
nuc = ts.reference_sequence[int(pos)]
assert a == pyslim.NUCLEOTIDES.index(nuc)
else:
b = ts.nucleotide_at(parent, pos)
c = ts.nucleotide_at(node, pos, ts.node(parent).time)
assert b == c
for k in np.where(node == ts.tables.mutations.node)[0]:
mut = ts.mutation(k)
if ts.site(mut.site).position == pos:
b = mut.metadata["mutation_list"][0]["nucleotide"]
assert a == b
class TestDeprecations(tests.PyslimTestCase):
def test_first_gen(self):
ts = next(self.get_slim_examples())
with pytest.warns(FutureWarning):
_ = ts.first_generation_individuals()
```
|
{
"source": "jelliotthiggins/tskit",
"score": 2
}
|
#### File: python/tests/test_fileobj.py
```python
import io
import multiprocessing
import os
import pathlib
import platform
import queue
import shutil
import socket
import socketserver
import tempfile
import traceback
import pytest
from pytest import fixture
import tskit
IS_WINDOWS = platform.system() == "Windows"
IS_OSX = platform.system() == "Darwin"
class TestPath:
@fixture
def tempfile_name(self):
with tempfile.TemporaryDirectory() as tmp_dir:
yield f"{tmp_dir}/plain_path"
def test_pathlib(self, ts_fixture, tempfile_name):
ts_fixture.dump(tempfile_name)
ts2 = tskit.load(tempfile_name)
assert ts_fixture.tables == ts2.tables
class TestPathLib:
@fixture
def pathlib_tempfile(self):
fd, path = tempfile.mkstemp(prefix="tskit_test_pathlib")
os.close(fd)
temp_file = pathlib.Path(path)
yield temp_file
temp_file.unlink()
def test_pathlib(self, ts_fixture, pathlib_tempfile):
ts_fixture.dump(pathlib_tempfile)
ts2 = tskit.load(pathlib_tempfile)
assert ts_fixture.tables == ts2.tables
class TestFileObj:
@fixture
def fileobj(self):
with tempfile.TemporaryDirectory() as tmp_dir:
with open(f"{tmp_dir}/fileobj", "wb") as f:
yield f
def test_fileobj(self, ts_fixture, fileobj):
ts_fixture.dump(fileobj)
fileobj.close()
ts2 = tskit.load(fileobj.name)
assert ts_fixture.tables == ts2.tables
def test_fileobj_multi(self, replicate_ts_fixture, fileobj):
file_offsets = []
for ts in replicate_ts_fixture:
ts.dump(fileobj)
file_offsets.append(fileobj.tell())
fileobj.close()
with open(fileobj.name, "rb") as f:
for ts, file_offset in zip(replicate_ts_fixture, file_offsets):
ts2 = tskit.load(f)
file_offset2 = f.tell()
assert ts.tables == ts2.tables
assert file_offset == file_offset2
class TestFileObjRW:
@fixture
def fileobj(self):
with tempfile.TemporaryDirectory() as tmp_dir:
pathlib.Path(f"{tmp_dir}/fileobj").touch()
with open(f"{tmp_dir}/fileobj", "r+b") as f:
yield f
def test_fileobj(self, ts_fixture, fileobj):
ts_fixture.dump(fileobj)
fileobj.seek(0)
ts2 = tskit.load(fileobj)
assert ts_fixture.tables == ts2.tables
def test_fileobj_multi(self, replicate_ts_fixture, fileobj):
file_offsets = []
for ts in replicate_ts_fixture:
ts.dump(fileobj)
file_offsets.append(fileobj.tell())
fileobj.seek(0)
for ts, file_offset in zip(replicate_ts_fixture, file_offsets):
ts2 = tskit.load(fileobj)
file_offset2 = fileobj.tell()
assert ts.tables == ts2.tables
assert file_offset == file_offset2
class TestFD:
@fixture
def fd(self):
with tempfile.TemporaryDirectory() as tmp_dir:
pathlib.Path(f"{tmp_dir}/fd").touch()
with open(f"{tmp_dir}/fd", "r+b") as f:
yield f.fileno()
def test_fd(self, ts_fixture, fd):
ts_fixture.dump(fd)
os.lseek(fd, 0, os.SEEK_SET)
ts2 = tskit.load(fd)
assert ts_fixture.tables == ts2.tables
def test_fd_multi(self, replicate_ts_fixture, fd):
for ts in replicate_ts_fixture:
ts.dump(fd)
os.lseek(fd, 0, os.SEEK_SET)
for ts in replicate_ts_fixture:
ts2 = tskit.load(fd)
assert ts.tables == ts2.tables
class TestUnsupportedObjects:
def test_string_io(self, ts_fixture):
with pytest.raises(io.UnsupportedOperation, match=r"fileno"):
ts_fixture.dump(io.StringIO())
with pytest.raises(io.UnsupportedOperation, match=r"fileno"):
tskit.load(io.StringIO())
with pytest.raises(io.UnsupportedOperation, match=r"fileno"):
ts_fixture.dump(io.BytesIO())
with pytest.raises(io.UnsupportedOperation, match=r"fileno"):
tskit.load(io.BytesIO())
def dump_to_stream(q_err, q_in, file_out):
"""
Get tree sequences from `q_in` and ts.dump() them to `file_out`.
Uncaught exceptions are placed onto the `q_err` queue.
"""
try:
with open(file_out, "wb") as f:
while True:
ts = q_in.get()
if ts is None:
break
ts.dump(f)
except Exception as exc:
tb = traceback.format_exc()
q_err.put((exc, tb))
def load_from_stream(q_err, q_out, file_in):
"""
tskit.load() tree sequences from `file_in` and put them onto `q_out`.
Uncaught exceptions are placed onto the `q_err` queue.
"""
try:
with open(file_in, "rb") as f:
while True:
try:
ts = tskit.load(f)
except EOFError:
break
q_out.put(ts)
except Exception as exc:
tb = traceback.format_exc()
q_err.put((exc, tb))
def stream(fifo, ts_list):
"""
data -> q_in -> ts.dump(fifo) -> tskit.load(fifo) -> q_out -> data_out
"""
q_err = multiprocessing.Queue()
q_in = multiprocessing.Queue()
q_out = multiprocessing.Queue()
proc1 = multiprocessing.Process(target=dump_to_stream, args=(q_err, q_in, fifo))
proc2 = multiprocessing.Process(target=load_from_stream, args=(q_err, q_out, fifo))
proc1.start()
proc2.start()
for data in ts_list:
q_in.put(data)
q_in.put(None) # signal the process that we're done
proc1.join(timeout=3)
if not q_err.empty():
# re-raise the first child exception
exc, tb = q_err.get()
print(tb)
raise exc
if proc1.is_alive():
# prevent hang if proc1 failed to join
proc1.terminate()
proc2.terminate()
raise RuntimeError("proc1 (ts.dump) failed to join")
ts_list_out = []
for _ in ts_list:
try:
data_out = q_out.get(timeout=3)
except queue.Empty:
# terminate proc2 so we don't hang
proc2.terminate()
raise
ts_list_out.append(data_out)
proc2.join(timeout=3)
if proc2.is_alive():
# prevent hang if proc2 failed to join
proc2.terminate()
raise RuntimeError("proc2 (tskit.load) failed to join")
assert len(ts_list) == len(ts_list_out)
for ts, ts_out in zip(ts_list, ts_list_out):
assert ts.tables == ts_out.tables
@pytest.mark.skipif(IS_WINDOWS, reason="No FIFOs on Windows")
class TestFIFO:
@fixture
def fifo(self):
temp_dir = tempfile.mkdtemp(prefix="tsk_test_streaming")
temp_fifo = os.path.join(temp_dir, "fifo")
os.mkfifo(temp_fifo)
yield temp_fifo
shutil.rmtree(temp_dir)
def test_single_stream(self, fifo, ts_fixture):
stream(fifo, [ts_fixture])
def test_multi_stream(self, fifo, replicate_ts_fixture):
stream(fifo, replicate_ts_fixture)
ADDRESS = ("localhost", 10009)
class Server(socketserver.ThreadingTCPServer):
allow_reuse_address = True
class StoreEchoHandler(socketserver.BaseRequestHandler):
def handle(self):
while True:
try:
ts = tskit.load(self.request.fileno())
except EOFError:
break
ts.dump(self.request.fileno())
self.server.shutdown()
def server_process(q):
server = Server(ADDRESS, StoreEchoHandler)
# Tell the client (on the other end of the queue) that it's OK to open
# a connection
q.put(None)
server.serve_forever()
@pytest.mark.skipif(IS_WINDOWS or IS_OSX, reason="Errors on systems without proper fds")
class TestSocket:
@fixture
def client_fd(self):
# Use a queue to synchronise the startup of the server and the client.
q = multiprocessing.Queue()
_server_process = multiprocessing.Process(target=server_process, args=(q,))
_server_process.start()
q.get(timeout=3)
client = socket.create_connection(ADDRESS)
yield client.fileno()
client.close()
_server_process.join(timeout=3)
def verify_stream(self, ts_list, client_fd):
for ts in ts_list:
ts.dump(client_fd)
echo_ts = tskit.load(client_fd)
assert ts.tables == echo_ts.tables
def test_single_then_multi(self, ts_fixture, replicate_ts_fixture, client_fd):
self.verify_stream([ts_fixture], client_fd)
self.verify_stream(replicate_ts_fixture, client_fd)
```
|
{
"source": "jellis18/enterprise_extensions",
"score": 2
}
|
#### File: enterprise_extensions/enterprise_extensions/hypermodel.py
```python
from __future__ import (absolute_import, division,
print_function)
import numpy as np
import scipy.stats as scistats
import scipy.linalg as sl
from enterprise import constants as const
from enterprise.signals import signal_base
try:
import cPickle as pickle
except:
import pickle
from enterprise.pulsar import Pulsar
from enterprise import constants as const
from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc
from .sampler import JumpProposal, get_parameter_groups
class HyperModel(object):
"""
Class to define hyper-model that is the concatenation of all models.
"""
def __init__(self, models, log_weights=None):
self.models = models
self.num_models = len(self.models)
self.log_weights = log_weights
#########
self.param_names, ind = np.unique(np.concatenate([p.param_names
for p in self.models.values()]),
return_index=True)
self.param_names = self.param_names[np.argsort(ind)]
self.param_names = np.append(self.param_names, 'nmodel').tolist()
#########
#########
self.params = [p for p in self.models[0].params] # start of param list
uniq_params = [str(p) for p in self.models[0].params] # which params are unique
for model in self.models.values():
# find differences between next model and concatenation of previous
param_diffs = np.setdiff1d([str(p) for p in model.params], uniq_params)
mask = np.array([str(p) in param_diffs for p in model.params])
# concatenate for next loop iteration
uniq_params = np.union1d([str(p) for p in model.params], uniq_params)
# extend list of unique parameters
self.params.extend([pp for pp in np.array(model.params)[mask]])
#########
#########
# get signal collections
self.snames = dict.fromkeys(np.unique(sum(sum([[[qq.signal_name for qq in pp._signals]
for pp in self.models[mm]._signalcollections]
for mm in self.models], []), [])))
for key in self.snames: self.snames[key] = []
for mm in self.models:
for sc in self.models[mm]._signalcollections:
for signal in sc._signals:
self.snames[signal.signal_name].extend(signal.params)
for key in self.snames: self.snames[key] = list(set(self.snames[key]))
for key in self.snames:
uniq_params, ind = np.unique([p.name for p in self.snames[key]],
return_index=True)
uniq_params = uniq_params[np.argsort(ind)].tolist()
all_params = [p.name for p in self.snames[key]]
self.snames[key] = np.array(self.snames[key])[[all_params.index(q)
for q in uniq_params]].tolist()
#########
def get_lnlikelihood(self, x):
# find model index variable
idx = list(self.param_names).index('nmodel')
nmodel = int(np.rint(x[idx]))
# find parameters of active model
q = []
for par in self.models[nmodel].param_names:
idx = self.param_names.index(par)
q.append(x[idx])
# only active parameters enter likelihood
active_lnlike = self.models[nmodel].get_lnlikelihood(q)
if self.log_weights is not None:
active_lnlike += self.log_weights[nmodel]
return active_lnlike
def get_lnprior(self, x):
# find model index variable
idx = list(self.param_names).index('nmodel')
nmodel = int(np.rint(x[idx]))
if nmodel not in self.models.keys():
return -np.inf
else:
lnP = 0
for p in self.models.values():
q = []
for par in p.param_names:
idx = self.param_names.index(par)
q.append(x[idx])
lnP += p.get_lnprior(np.array(q))
return lnP
def get_parameter_groups(self):
groups = []
for p in self.models.values():
groups.extend(get_parameter_groups(p))
list(np.unique(groups))
groups.extend([[len(self.param_names)-1]]) # nmodel
return groups
def initial_sample(self):
"""
Draw an initial sample from within the hyper-model prior space.
"""
x0 = [np.array(p.sample()).ravel().tolist() for p in self.models[0].params]
uniq_params = [str(p) for p in self.models[0].params]
for model in self.models.values():
param_diffs = np.setdiff1d([str(p) for p in model.params], uniq_params)
mask = np.array([str(p) in param_diffs for p in model.params])
x0.extend([np.array(pp.sample()).ravel().tolist() for pp in np.array(model.params)[mask]])
uniq_params = np.union1d([str(p) for p in model.params], uniq_params)
x0.extend([[0.1]])
return np.array([p for sublist in x0 for p in sublist])
def draw_from_nmodel_prior(self, x, iter, beta):
"""
Model-index uniform distribution prior draw.
"""
q = x.copy()
idx = list(self.param_names).index('nmodel')
q[idx] = np.random.uniform(-0.5,self.num_models-0.5)
lqxy = 0
return q, float(lqxy)
def setup_sampler(self, outdir='chains', resume=False, sample_nmodel=True,
empirical_distr=None, groups=None):
"""
Sets up an instance of PTMCMC sampler.
We initialize the sampler the likelihood and prior function
from the PTA object. We set up an initial jump covariance matrix
with fairly small jumps as this will be adapted as the MCMC runs.
We will setup an output directory in `outdir` that will contain
the chain (first n columns are the samples for the n parameters
and last 4 are log-posterior, log-likelihood, acceptance rate, and
an indicator variable for parallel tempering but it doesn't matter
because we aren't using parallel tempering).
We then add several custom jump proposals to the mix based on
whether or not certain parameters are in the model. These are
all either draws from the prior distribution of parameters or
draws from uniform distributions.
"""
# dimension of parameter space
ndim = len(self.param_names)
# initial jump covariance matrix
cov = np.diag(np.ones(ndim) * 1**2) ## used to be 0.1
# parameter groupings
if groups is None:
groups = self.get_parameter_groups()
sampler = ptmcmc(ndim, self.get_lnlikelihood, self.get_lnprior, cov,
groups=groups, outDir=outdir, resume=resume)
np.savetxt(outdir+'/pars.txt', self.param_names, fmt='%s')
np.savetxt(outdir+'/priors.txt', self.params, fmt='%s')
# additional jump proposals
jp = JumpProposal(self, self.snames, empirical_distr=empirical_distr)
# always add draw from prior
sampler.addProposalToCycle(jp.draw_from_prior, 5)
# try adding empirical proposals
if empirical_distr is not None:
print('Adding empirical proposals...\n')
sampler.addProposalToCycle(jp.draw_from_empirical_distr, 25)
# Red noise prior draw
if 'red noise' in self.snames:
print('Adding red noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_red_prior, 10)
# DM GP noise prior draw
if 'dm_gp' in self.snames:
print('Adding DM GP noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm_gp_prior, 10)
# DM annual prior draw
if 'dm_s1yr' in jp.snames:
print('Adding DM annual prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm1yr_prior, 10)
# DM dip prior draw
if 'dmexp' in '\t'.join(jp.snames):
print('Adding DM exponential dip prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmexpdip_prior, 10)
# DM cusp prior draw
if 'dm_cusp' in jp.snames:
print('Adding DM exponential cusp prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmexpcusp_prior, 10)
# DMX prior draw
if 'dmx_signal' in jp.snames:
print('Adding DMX prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmx_prior, 10)
# SW prior draw
if 'gp_sw' in jp.snames:
print('Adding Solar Wind DM GP prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm_sw_prior, 10)
# Ephemeris prior draw
if 'd_jupiter_mass' in self.param_names:
print('Adding ephemeris model prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_ephem_prior, 10)
# GWB uniform distribution draw
if 'gw_log10_A' in self.param_names:
print('Adding GWB uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_gwb_log_uniform_distribution, 10)
# Dipole uniform distribution draw
if 'dipole_log10_A' in self.param_names:
print('Adding dipole uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_dipole_log_uniform_distribution, 10)
# Monopole uniform distribution draw
if 'monopole_log10_A' in self.param_names:
print('Adding monopole uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_monopole_log_uniform_distribution, 10)
# BWM prior draw
if 'bwm_log10_A' in self.param_names:
print('Adding BWM prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_bwm_prior, 10)
# CW prior draw
if 'cw_log10_h' in self.param_names:
print('Adding CW prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_cw_log_uniform_distribution, 10)
# Prior distribution draw for parameters named GW
if any([str(p).split(':')[0] for p in list(self.params) if 'gw' in str(p)]):
print('Adding gw param prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_par_prior(
par_names=[str(p).split(':')[0] for
p in list(self.params)
if 'gw' in str(p)]), 10)
# Model index distribution draw
if sample_nmodel:
if 'nmodel' in self.param_names:
print('Adding nmodel uniform distribution draws...\n')
sampler.addProposalToCycle(self.draw_from_nmodel_prior, 25)
return sampler
def get_process_timeseries(self, psr, chain, burn, comp='DM',
mle=False, model=0):
"""
Construct a time series realization of various constrained processes.
:param psr: etnerprise pulsar object
:param chain: MCMC chain from sampling all models
:param burn: desired number of initial samples to discard
:param comp: which process to reconstruct? (red noise or DM) [default=DM]
:param mle: create time series from ML of GP hyper-parameters? [default=False]
:param model: which sub-model within the super-model to reconstruct from? [default=0]
:return ret: time-series of the reconstructed process
"""
wave = 0
pta = self.models[model]
model_chain = chain[np.rint(chain[:,-5])==model,:]
# get parameter dictionary
if mle:
ind = np.argmax(model_chain[:, -4])
else:
ind = np.random.randint(burn, model_chain.shape[0])
params = {par: model_chain[ind, ct]
for ct, par in enumerate(self.param_names)
if par in pta.param_names}
# deterministic signal part
wave += pta.get_delay(params=params)[0]
# get linear parameters
Nvec = pta.get_ndiag(params)[0]
phiinv = pta.get_phiinv(params, logdet=False)[0]
T = pta.get_basis(params)[0]
d = pta.get_TNr(params)[0]
TNT = pta.get_TNT(params)[0]
# Red noise piece
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
try:
u, s, _ = sl.svd(Sigma)
mn = np.dot(u, np.dot(u.T, d)/s)
Li = u * np.sqrt(1/s)
except np.linalg.LinAlgError:
Q, R = sl.qr(Sigma)
Sigi = sl.solve(R, Q.T)
mn = np.dot(Sigi, d)
u, s, _ = sl.svd(Sigi)
Li = u * np.sqrt(1/s)
b = mn + np.dot(Li, np.random.randn(Li.shape[0]))
# find basis indices
pardict = {}
for sc in pta._signalcollections:
ntot = 0
for sig in sc._signals:
if sig.signal_type == 'basis':
basis = sig.get_basis(params=params)
nb = basis.shape[1]
pardict[sig.signal_name] = np.arange(ntot, nb+ntot)
ntot += nb
# DM quadratic + GP
if comp == 'DM':
idx = pardict['dm_gp']
wave += np.dot(T[:,idx], b[idx])
ret = wave * (psr.freqs**2 * const.DM_K * 1e12)
elif comp == 'scattering':
idx = pardict['scattering_gp']
wave += np.dot(T[:,idx], b[idx])
ret = wave * (psr.freqs**4) # * const.DM_K * 1e12)
elif comp == 'red':
idx = pardict['red noise']
wave += np.dot(T[:,idx], b[idx])
ret = wave
elif comp == 'FD':
idx = pardict['FD']
wave += np.dot(T[:,idx], b[idx])
ret = wave
elif comp == 'all':
wave += np.dot(T, b)
ret = wave
else:
ret = wave
return ret
```
|
{
"source": "jellis18/plotutils",
"score": 3
}
|
#### File: plotutils/plotutils/bounded_kde.py
```python
import numpy as np
from scipy.special import erf
from scipy.stats import gaussian_kde
class Bounded_kde(gaussian_kde):
r"""Represents a one-dimensional Gaussian kernel density estimator
for a probability distribution function that exists on a bounded
domain."""
def __init__(self, pts, low=None, high=None, *args, **kwargs):
"""Initialize with the given bounds. Either ``low`` or
``high`` may be ``None`` if the bounds are one-sided. Extra
parameters are passed to :class:`gaussian_kde`.
:param low: The lower domain boundary.
:param high: The upper domain boundary."""
pts = np.atleast_1d(pts)
assert pts.ndim == 1, 'Bounded_kde can only be one-dimensional'
super(Bounded_kde, self).__init__(pts, *args, **kwargs)
self._low = low
self._high = high
@property
def low(self):
"""The lower bound of the domain."""
return self._low
@property
def high(self):
"""The upper bound of the domain."""
return self._high
def evaluate(self, xs):
"""Return an estimate of the density evaluated at the given
points."""
xs = np.atleast_1d(xs)
assert xs.ndim == 1, 'points must be one-dimensional'
pdf = super(Bounded_kde, self).evaluate(xs)
if self.low is not None:
pdf += super(Bounded_kde, self).evaluate(2.0*self.low - xs)
if self.high is not None:
pdf += super(Bounded_kde, self).evaluate(2.0*self.high - xs)
return pdf
__call__ = evaluate
```
|
{
"source": "Jelloeater/8266_web-relay",
"score": 3
}
|
#### File: Jelloeater/8266_web-relay/main.py
```python
import socket
import ure as re
import time
import machine
def run():
# Standard socket stuff:
host = ''
port = 80
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, port))
sock.listen(1) # don't queue up any requests
while True:
csock, caddr = sock.accept()
print("\nConnection from: " + str(caddr))
req = csock.recv(1024) # get the request, 1kB max
get_req = str(req).split('GET /')[1].split('HTTP')[0]
print('Req RAW:')
print(req)
output = parse_req(get_req)
csock.sendall("""HTTP/1.0 200 OK
Content-Type: text/html
<html>
<head>
</head>
<body>
<form action="" method="get">
<button name="pin1" value="True">P1-On</button>
</form>
<form action="" method="get">
<button name="pin1" value="False">P1-Off</button>
</form>
<br>
<form action="" method="get">
<button name="pin2" value="True">P2-On</button>
</form>
<form action="" method="get">
<button name="pin2" value="False">P2-Off</button>
</form>
<br>
OUTPUT:
{0}
</body>
</html>
""".format(str(output)))
csock.close()
def parse_req(get_req):
print('Get Req:')
print(get_req)
if 'favicon.ico' not in get_req:
get_req = get_req[1:]
data = get_req.split('=')
print(data)
return pin_logic(data)
def pin_logic(data):
import machine
if 'pin1' in data[0]:
machine.Pin(5, machine.Pin.OUT).on() if 'True' in data[1] else machine.Pin(5, machine.Pin.OUT).off()
if 'pin2' in data[0]:
machine.Pin(2, machine.Pin.OUT).on() if 'True' in data[1] else machine.Pin(2, machine.Pin.OUT).off()
try:
run()
except:
time.sleep(3)
machine.reset()
```
|
{
"source": "Jellohunter7/CalculatorPopUp",
"score": 3
}
|
#### File: Jellohunter7/CalculatorPopUp/Calculator.py
```python
import tkinter as tk
window = tk.Tk()
window.title("Calulator")
output = tk.Entry(window, width="50")
output.pack()
def button1 ():
output.insert("end", "1")
def button2 ():
output.insert("end", "2")
def button3 ():
output.insert("end", "3")
def button4 ():
output.insert("end", "4")
def button5 ():
output.insert("end", "5")
def button6 ():
output.insert("end", "6")
def button7 ():
output.insert("end", "7")
def button8 ():
output.insert("end", "8")
def button9 ():
output.insert("end", "9")
def button0 ():
output.insert("end", "0")
def buttonplus ():
output.insert("end"," + ")
def buttonminus ():
output.insert("end"," - ")
def buttontimes ():
output.insert("end"," * ")
def buttonex ():
output.insert("end"," ** ")
def buttondivide ():
output.insert("end"," / ")
def buttonequal ():
string = output.get()
answer = eval(string)
output.delete(0, "end")
output.insert("end", answer)
def buttonclear ():
output.delete(0, "end")
def deletelast ():
output.delete(1,"end")
button1 = tk.Button(window, text="1", command=button1)
button1.pack(side="left")
button2 = tk.Button(window, text="2", command=button2)
button2.pack(side="left")
button3 = tk.Button(window, text="3", command=button3)
button3.pack(side="left")
button4 = tk.Button(window, text="4", command=button4)
button4.pack(side="left")
button5 = tk.Button(window, text="5", command=button5)
button5.pack(side="left")
button6 = tk.Button(window, text="6", command=button6)
button6.pack(side="left")
button7 = tk.Button(window, text="7", command=button7)
button7.pack(side="left")
button8 = tk.Button(window, text="8", command=button8)
button8.pack(side="left")
button9 = tk.Button(window, text="9", command=button9)
button9.pack(side="left")
button0 = tk.Button(window, text="0", command=button0)
button0.pack(side="left")
buttonplus = tk.Button(window, text="+", command=buttonplus)
buttonplus.pack(side="left")
buttonminus = tk.Button(window, text="-", command=buttonminus)
buttonminus.pack(side="left")
buttondivide = tk.Button(window, text="/", command=buttondivide)
buttondivide.pack(side="left")
buttontimes = tk.Button(window, text="*", command=buttontimes)
buttontimes.pack(side="left")
buttonequal = tk.Button(window, text="=", command=buttonequal)
buttonequal.pack(side="left")
buttonex = tk.Button(window, text="exponent", command=buttonex)
buttonex.pack(side="left")
buttonclear = tk.Button(window, text="clear", command=buttonclear)
buttonclear.pack(side="left")
delete = tk.Button(window, text="delete", command=deletelast)
delete.pack()
window.mainloop()
```
|
{
"source": "Jellonator/02-Text-Adventure",
"score": 3
}
|
#### File: Jellonator/02-Text-Adventure/gameenemy.py
```python
import gameutil
import random
class EnemyAction:
"""
Enemy Action
"""
def __init__(self, attackname, attackdef):
self.next_action = attackdef.get("next-action")
if self.next_action != None:
self.next_action = parse_enemy_action("anonymous-action", self.next_action)
def use(self, player, enemy):
"""
Use the enemy's attack
Parameters
----------
player: Character
The player to attack
enemy: GameEnemy
The enemy that is attacking
"""
self._game_use(player, enemy)
enemy.next_action = self.next_action
def _game_use(self, player, enemy):
"""
Do not call directly!
"""
print("Nothing to do!")
class EnemyAttack(EnemyAction):
"""
Enemy attack
"""
def __init__(self, attackname, attackdef):
super().__init__(attackname, attackdef)
self.description = attackdef.get("desc", "It attacks you")
self.description_hit = attackdef.get("desc-hit", "It hits you")
self.description_miss = attackdef.get("desc-miss", "It missed you")
self.damage = attackdef.get("damage", 1)
self.stats = attackdef.get("stat", [])
if isinstance(self.stats, str):
self.stats = [self.stats]
for i in range(len(self.stats)):
self.stats[i] = self.stats[i].lower()
self.roll = attackdef.get("roll", 1)
self.damage_type = attackdef.get("damage-type", "physical")
if self.damage_type not in ["physical", "mental"]:
print("WARNING: damage type '{}' not recognized.".format(self.damage_type))
self.damage_type = "physical"
def _game_use(self, player, enemy):
print(self.description)
input("The {} is rolling {}d6 to attack...".format(enemy.name, self.roll))
dice_attack = enemy.get_attack_roll(self.roll)
dice_attack_total = sum(dice_attack)
dice_attack_fmt = gameutil.FMT_BAD.format(' '.join((str(x) for x in dice_attack)))
dice_attack_total_fmt = gameutil.FMT_BAD.format(dice_attack_total)
print("The {} rolled [{}] = {}".format(enemy.name, dice_attack_fmt, dice_attack_total_fmt))
player_roll = player.get_defense_roll(self.damage_type)
input("Rolling {}d6 for to defend...".format(player_roll))
dice_player = gameutil.roll_dice(player_roll, 6)
dice_player_total = sum(dice_player)
dice_player_fmt = gameutil.FMT_GOOD.format(' '.join((str(x) for x in dice_player)))
dice_player_total_fmt = gameutil.FMT_GOOD.format(dice_player_total)
print("You rolled [{}] = {}".format(dice_player_fmt, dice_player_total_fmt))
if dice_attack_total > dice_player_total:
print(self.description_hit)
fmt_damage = ["{} {}".format(gameutil.FMT_BAD.format(self.damage),
gameutil.FMT_STAT.format(stat.upper())) for stat in self.stats]
print("You took {} damage!".format(gameutil.join_list_pretty(fmt_damage)))
for stat in self.stats:
playerstat = player.get_stat(stat)
playerstat.subtract(self.damage)
else:
print(self.description_miss)
class EnemyActionWait(EnemyAction):
"""
Enemy wait
"""
def __init__(self, attackname, attackdef):
super().__init__(attackname, attackdef)
self.description = attackdef.get("desc", "It attacks you")
def _game_use(self, player, enemy):
print(self.description)
def parse_enemy_action(actionname, actiondata):
atype = actiondata.get("type")
if atype == "attack":
return EnemyAttack(actionname, actiondata)
elif atype == "wait":
return EnemyActionWait(actionname, actiondata)
else:
print("Unknown enemy attack type '{}'".format(atype))
return None
class GameEnemy:
"""
Game enemy
"""
def __init__(self, enemyname, enemydata):
self.shortname = enemyname
self.name = enemydata.get("name", enemyname)
self.look = enemydata.get("look", "It's a {}".format(self.name))
self.nameplural = enemydata.get("plural", self.name + "s")
self.health = gameutil.EnemyHealth(enemydata.get("health", 1))
self.description = enemydata.get("desc", "")
self.defense = enemydata.get("defense", 1)
self.attacks = []
self.next_action = None
if "actions" in enemydata:
for actionname, actiondata in enemydata["actions"].items():
action = parse_enemy_action(actionname, actiondata)
if action != None:
self.attacks.append(action)
# -1 is no curse.
# Player's attack sets curses to 1. This way, curse doesn't get removed
# for the player's next attack. Curse is also decremented *before* the
# enemy's attack so that their attack is only cursed for 1 turn.
self.curse = -1
def get_defense_value(self):
"""
Get this enemy's defense value
"""
defense = self.defense
if self.curse >= 0 and defense > 1:
defense -= 1
return defense
def get_defense_roll(self):
"""
Roll defense
Returns a list[int] of dice values
"""
return gameutil.roll_dice(self.get_defense_value(), 6)
def get_attack_roll(self, roll):
"""
Roll attack
Returns a list[int] of dice values.
"""
if self.curse >= 0 and roll > 1:
roll -= 1
return gameutil.roll_dice(roll, 6)
def is_dead(self):
"""
Returns true if the enemy is dead
"""
return self.health.value == 0
def do_turn(self, gamedata):
"""
Perform this enemy's turn
"""
self.curse = self.curse - 1
if len(self.attacks) == 0:
print("The {} can't do anything.".format(self.name))
else:
if self.next_action != None:
a = self.next_action
self.next_action = None
a.use(gamedata.player, self)
elif len(self.attacks) > 0:
atk = random.choice(self.attacks)
atk.use(gamedata.player, self)
else:
print("Does nothing")
def fmt_name(self):
return gameutil.FMT_ENEMY.format(self.name)
def __str__(self):
ret = gameutil.FMT_ENEMY.format(self.name)
if self.health.value != self.health.maxvalue:
ret += " [-{}]".format(self.health.maxvalue - self.health.value)
if self.curse > 0:
ret += " [curse {}]".format(self.curse)
return ret
```
|
{
"source": "jellonek/pyramid_apispec",
"score": 3
}
|
#### File: pyramid_apispec/pyramid_apispec/helpers.py
```python
A greeting endpoint.
---
x-extension: value
get:
description: get a greeting
responses:
200:
description: a pet to be returned
schema:
$ref: #/definitions/SomeFooBody
\"""
return 'hi'
@view_config(route_name='openapi_spec', renderer='json')
def api_spec(request):
spec = APISpec(
title='Some API',
version='1.0.0',
plugins=[
'apispec.ext.marshmallow'
],
)
# using marshmallow plugin here
spec.definition('SomeFooBody', schema=MarshmallowSomeFooBodySchema)
# inspect the `foo_route` and generate operations from docstring
add_pyramid_paths(spec, 'foo_route', request=request)
# inspection supports filtering via pyramid add_view predicate arguments
add_pyramid_paths(
spec, 'bar_route', request=request, request_method='post')
return spec.to_dict()
"""
from __future__ import absolute_import
from apispec.utils import load_operations_from_docstring, load_yaml_from_docstring
# py 2/3 compat
try:
import basestring
string_type = basestring
except ImportError:
string_type = str
def is_string(val):
return isinstance(val, string_type)
ALL_METHODS = ("get", "post", "put", "patch", "delete", "head", "options")
def add_pyramid_paths(
spec, route_name, request=None, request_method=None, operations=None, **kwargs
):
"""
Adds a route and view info to spec
:param spec:
ApiSpec object
:param route_name:
Route name to inspect
:param request:
Request object, if `None` then `get_current_request()` will be used
:param request_method:
Request method predicate
:param operations:
Operations dict that will be used instead of introspection
:param kwargs:
Additional kwargs for predicate matching
:return:
"""
from pyramid.threadlocal import get_current_request
if request is None:
request = get_current_request()
registry = request.registry
introspector = registry.introspector
route = introspector.get("routes", route_name)
views = introspector.related(route)
# needs to be rewritten to internal name
if request_method:
kwargs["request_methods"] = request_method
# kwargs.setdefault('route_name', route_name)
for view in views:
matches = True
for kw in kwargs.keys():
# request_methods can be either a list of strings or a string
# so lets normalize via sets
if kw == "request_methods":
if is_string(kwargs[kw]):
kwargs[kw] = [kwargs[kw]]
methods = view.get(kw) or ALL_METHODS
if is_string(methods):
methods = [methods]
if not set(kwargs[kw] or []).intersection(methods):
matches = False
else:
if not view.get(kw) == kwargs[kw]:
matches = False
if not matches:
continue
final_operations = {}
# views can be class based
if view.get("attr"):
global_meta = load_operations_from_docstring(view["callable"].__doc__)
if global_meta:
final_operations.update(global_meta)
f_view = getattr(view["callable"], view["attr"])
# or just function callables
else:
f_view = view.get("callable")
if operations is None:
methods = view.get("request_methods")
view_operations = load_operations_from_docstring(f_view.__doc__)
if not view_operations:
view_operations = {}
if is_string(methods):
methods = [methods]
if not methods:
methods = ALL_METHODS[:]
operation = load_yaml_from_docstring(f_view.__doc__)
if operation:
for method in methods:
view_operations[method.lower()] = operation
final_operations.update(view_operations)
else:
final_operations = operations
spec.add_path(route["pattern"], operations=final_operations)
```
|
{
"source": "jelloslinger/2015-mlb-hackathon",
"score": 3
}
|
#### File: framework/model/common.py
```python
import calendar
import datetime
from math import ceil
from sqlalchemy import Column
from sqlalchemy.types import *
from datapro import IdMixin, Model
class Date(Model, IdMixin):
__schema__ = 'common'
__table_name__ = 'Date'
__table_name_mask__ = '{__table_type__}_{__table_name__}'
__table_type__ = 'DIM'
date = Column(DATE, nullable=False, unique=True)
full = Column(VARCHAR(255), nullable=False)
year = Column(SMALLINT, nullable=False)
half = Column(SMALLINT, nullable=False)
quarter = Column(SMALLINT, nullable=False)
month = Column(SMALLINT, nullable=False)
dayOfYear = Column(SMALLINT, nullable=False)
dayOfYearNoLeap = Column(SMALLINT, nullable=False)
dayOfQuarter = Column(SMALLINT, nullable=False)
yearOfWeekYear = Column(SMALLINT, nullable=False)
weekOfWeekYear = Column(SMALLINT, nullable=False)
dayOfMonth = Column(SMALLINT, nullable=False)
dayOfWeek = Column(SMALLINT, nullable=False)
yearAndHalf = Column(VARCHAR(255), nullable=False)
yearAndQuarter = Column(VARCHAR(255), nullable=False)
yearAndWeek = Column(VARCHAR(255), nullable=False)
monthNameShort = Column(VARCHAR(255), nullable=False)
monthNameLong = Column(VARCHAR(255), nullable=False)
dayOfWeekNameShort = Column(VARCHAR(255), nullable=False)
dayOfWeekNameLong = Column(VARCHAR(255), nullable=False)
yearAndMonthNameShort = Column(VARCHAR(255), nullable=False)
yearAndMonthNameLong = Column(VARCHAR(255), nullable=False)
weekend = Column(VARCHAR(255), nullable=False)
isLastDayOfMonth = Column(BOOLEAN, nullable=False)
@classmethod
def from_date(cls, d):
half = int(ceil(float(d.month) / 6.0))
quarter = int(ceil(float(d.month) / 3.0))
dayOfYear = d.timetuple().tm_yday
dayOfYearNoLeap = dayOfYear
if calendar.isleap(d.year):
if dayOfYear == 60:
dayOfYearNoLeap = 0
elif dayOfYear > 60:
dayOfYearNoLeap -= 1
dayOfQuarter = (d - datetime.date(d.year, (3 * (quarter - 1)) + 1, 1)).days + 1
iso = d.isocalendar()
isLastDayOfMonth = True if (d.day == calendar.monthrange(d.year, d.month)[1]) else False
return cls(
date=d,
full=d.__format__('%A, %B ') + str(d.day) + d.__format__(', %Y'),
year=d.year,
half=half,
quarter=quarter,
month=d.month,
dayOfYear=dayOfYear,
dayOfYearNoLeap=dayOfYearNoLeap,
dayOfQuarter=dayOfQuarter,
yearOfWeekYear=iso[0], # ISO calendar year
weekOfWeekYear=iso[1], # ISO calendar week number
dayOfMonth=d.day,
dayOfWeek=iso[2], # ISO calendar day of week
yearAndHalf='H' + str(half) + ' ' + str(d.year),
yearAndQuarter='Q' + str(quarter) + ' ' + str(d.year),
yearAndWeek='W' + str(iso[1]) + ' ' + str(iso[0]),
monthNameShort=d.__format__('%b'),
monthNameLong=d.__format__('%B'),
dayOfWeekNameShort=d.__format__('%a'),
dayOfWeekNameLong=d.__format__('%A'),
yearAndMonthNameShort=d.__format__('%b %Y'),
yearAndMonthNameLong=d.__format__('%B %Y'),
weekend='Weekend' if iso[2] >= 6 else 'Weekday',
isLastDayOfMonth=isLastDayOfMonth
)
__all__ = (
Date,
)
```
#### File: datapro/framework/util.py
```python
from copy import deepcopy
import pytz
def dict_merge(master, merge):
"""Recursively merge dicts
(originally found at https://www.xormedia.com/recursively-merge-dictionaries-in-python/)
Args:
master (dict): base dictionary
merge (dict): dictionary with entries to be merged into the base dictionary
Returns:
dict: resulting dictionary from `merge` merged into `master`
"""
result = deepcopy(master)
for k, v in iter(merge):
if k in result and isinstance(result[k], dict) and isinstance(merge[k], dict):
dict_merge(result[k], merge[k])
else:
result[k] = deepcopy(v)
return result
def to_utc(dt, tz, is_dst=False):
# TODO - cleanup docstring / is this the right spot for this function?
"""
Takes in a naive datetime and timezone, and returns a naive datetime, converted to UTC.
Note that the nature of DST means that dates will be off by one hour, for one hour per year (if the given tz is DST-aware).
This is because, during roll-back, an hour occurs twice, and we don't know which hour (before or after the daylight savings switch) a naive datetime is talking about.
Naive: 12:30am (DST=True) -> 1:30am (DST=True) -> 1:30am (DST=False) -> 2:30am (DST=False)
UTC: 4:30am -> 5:30am -> 6:30am -> 7:30am
If we're guessing that is_dst is off, it means UTC 6am-7am happens twice (which is wrong!), just like Eastern 1am-2am
"""
return dt - pytz.timezone(tz).utcoffset(dt, is_dst=is_dst)
```
|
{
"source": "jellowfish/backward",
"score": 4
}
|
#### File: backward/backward/__main__.py
```python
import sys
import readline
from .backward import Backward
def main(file):
ctx = Backward()
if file:
try:
with open(file, "r") as fid:
for line in fid:
ans = ctx.evaluate(line)
if ans != [] and ans != [None]:
print(ans)
except Exception as e:
print(f"{type(e)}:", e)
else:
while True:
txt = ""
try:
txt = input("λ ")
except KeyboardInterrupt:
print()
exit(0)
try:
ans = ctx.evaluate(txt)
if ans != [] and ans != [None]:
print(ans)
except Exception as e:
print(f"{type(e)}:", e)
if __name__ == "__main__":
main(sys.argv[1] if len(sys.argv) > 1 else None)
```
#### File: backward/tests/test_read.py
```python
import unittest
from backward import Backward
# ---------------------------------------------------------------------
class TestRead(unittest.TestCase):
def test_empty(self):
ctx = Backward()
ans = ctx.read("")
self.assertEqual(str(ans), "[]")
def test_atom(self):
ctx = Backward()
ans = ctx.read("a")
self.assertEqual(str(ans), "[atom(a)]")
ans = ctx.read("a b c")
self.assertEqual(str(ans), "[atom(a), atom(b), atom(c)]")
def test_not(self):
ctx = Backward()
ans = ctx.read("!a")
self.assertEqual(str(ans), "[not(atom(a))]")
ans = ctx.read("!a !b")
self.assertEqual(str(ans), "[not(atom(a)), not(atom(b))]")
def test_and(self):
ctx = Backward()
ans = ctx.read("a & b")
self.assertEqual(str(ans), "[and((atom(a), atom(b)))]")
ans = ctx.read("!a & b")
self.assertEqual(str(ans), "[and((not(atom(a)), atom(b)))]")
ans = ctx.read("a & !b")
self.assertEqual(str(ans), "[and((atom(a), not(atom(b))))]")
ans = ctx.read("a & b & c")
self.assertEqual(str(ans), "[and((and((atom(a), atom(b))), atom(c)))]")
def test_or(self):
ctx = Backward()
ans = ctx.read("a | b")
self.assertEqual(str(ans), "[or((atom(a), atom(b)))]")
ans = ctx.read("!a | b")
self.assertEqual(str(ans), "[or((not(atom(a)), atom(b)))]")
ans = ctx.read("a | !b")
self.assertEqual(str(ans), "[or((atom(a), not(atom(b))))]")
ans = ctx.read("a | b & c")
self.assertEqual(str(ans), "[and((or((atom(a), atom(b))), atom(c)))]")
def test_xor(self):
ctx = Backward()
ans = ctx.read("a ^ b")
self.assertEqual(str(ans), "[xor((atom(a), atom(b)))]")
ans = ctx.read("!a ^ b")
self.assertEqual(str(ans), "[xor((not(atom(a)), atom(b)))]")
ans = ctx.read("a ^ !b")
self.assertEqual(str(ans), "[xor((atom(a), not(atom(b))))]")
ans = ctx.read("a ^ b | c")
self.assertEqual(str(ans), "[or((xor((atom(a), atom(b))), atom(c)))]")
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "Jelly6489/Stock-Proj",
"score": 2
}
|
#### File: status/model/status_dao.py
```python
from com_blacktensor.cop.cov.status.model.status_dto import CovidStatusDto
from com_blacktensor.ext.db import db, openSession
from sqlalchemy import func
# ============================================================
# ================== =====================
# ================== Modeling =====================
# ================== =====================
# ============================================================
# JPA Repository
class CovidStatusDao(CovidStatusDto):
@staticmethod
def save_data_bulk(datas):
Session = openSession()
session = Session()
session.bulk_insert_mappings(CovidStatusDto, datas.to_dict(orient='records'))
session.commit()
session.close()
@staticmethod
def count():
Session = openSession()
session = Session()
result = session.query(func.count(CovidStatusDto.no)).one()[0]
session.close()
return result
@classmethod
def find_all(self):
Session = openSession()
session = Session()
result = session.query(CovidStatusDto).all()
session.close()
return result
```
#### File: status/resources/status.py
```python
import logging
from flask_restful import Resource
from flask import request
from flask import jsonify
from com_blacktensor.cop.cov.status.model.status_dao import CovidStatusDao
# ============================================================
# ================== =====================
# ================== Resourcing =====================
# ================== =====================
# ============================================================
class CovidStatus(Resource):
def __init__(self):
self.dao = CovidStatusDao()
def get(self):
result = self.dao.find_all()
return jsonify([item.json for item in result])
```
#### File: emo/model/emotion_dao.py
```python
import requests
import pandas as pd
import codecs
import numpy as np
import re
from bs4 import BeautifulSoup
from konlpy.tag import Twitter
from collections import Counter
from com_blacktensor.ext.db import db, openSession
from sqlalchemy import func
import json
from sqlalchemy import Column, Integer, String, Date
from com_blacktensor.cop.emo.model.emotion_kdd import EmotionKdd
from com_blacktensor.cop.emo.model.emotion_dto import EmotionDto, StockNewsDto
from com_blacktensor.cop.emo.model.emotion_dfo import EmotionDfo
from com_blacktensor.cop.emo.model.emotion_kdd import keyword
# import time
# import multiprocessing
Session = openSession()
session = Session()
class EmotionDao(EmotionDto):
# @classmethod
# def bulk(cls, emotion_dfo):
# dfo = emotion_dfo.data_pro(0, keyword)
# print('--------Emotion----------')
# print(dfo.head())
# session.bulk_insert_mappings(cls, dfo.to_dict(orient="records"))
# session.commit()
# session.close()
@staticmethod
def bulk():
emotion_dfo = EmotionDfo()
dfo = emotion_dfo.data_pro(keyword)
session.bulk_insert_mappings(EmotionDto, dfo.to_dict(orient='records'))
session.commit()
session.close()
@staticmethod
def save(emotion):
session.add(emotion)
session.commit()
@classmethod
def update(cls, emotion):
# session.query(cls).filter(cls.keyword == emotion['keyword'])\
emotion = session.query(cls).filter(cls.keyword == keyword).first()\
.update({cls.no : emotion['no'],\
cls.positive:emotion['positive'],\
cls.pos_count:emotion['pos_count'],\
cls.negative:emotion['negative'],\
cls.neg_count:emotion['neg_count']})
session.commit()
@classmethod
def count(cls):
return session.query(func.count(cls.no)).one()
# @classmethod
# def find_insert(cls, emotion, keyword):
# session.query(cls).filter_by(cls.keyword == emotion['keyword']).last()\
# .insert({cls.no : emotion['no'],\
# cls.positive:emotion['positive'],\
# cls.pos_count:emotion['pos_count'],\
# cls.negative:emotion['negative'],\
# cls.neg_count:emotion['neg_count'],\
# cls.keyword:emotion['keyword']})
# if session.query(cls).filter(cls.keyword != keyword):
# emotion_dfo = EmotionDfo()
# dfo = emotion_dfo.data_pro(keyword)
# session.bulk_insert_mappings(EmotionDto, dfo.to_dict(orient='records'))
# session.commit()
# session.close()
# return session.query(cls).all()
@classmethod
def find_all(cls):
return session.query(cls).all()
# @classmethod
# def find_x(cls, keyword):
# # session.query(cls).filter(cls.keyword != keyword).last()
# # session.query(cls).filter(cls.keyword.like('keyword'))
# session.query(cls).filter(cls.keyword != keyword)
# session.close()
# return 0
# @classmethod
# def find_y(cls, keyword):
# # session.query(cls).filter(cls.keyword != keyword).last()
# # session.query(cls).filter(cls.keyword.like('keyword'))
# session.query(cls).filter(cls.keyword == keyword)
# session.close()
# return 0
# @classmethod
# def find_like(cls, keyword):
# # session.query(cls).filter(cls.keyword.like('%'+keyword+'%'))
# session.query(cls).filter(cls.keyword.like('%'+keyword+'%'))
# print(cls.keyword)
# session.close()
# return 0
# # @classmethod
# # def match(cls, keyword):
# @staticmethod
# def match(emotion, keyword):
# a = session.query(EmotionDto).filter(EmotionDto.keyword == keyword).all()
# print('===========확인1==========')
# print(a)
# print('===========확인2==========')
# print(EmotionDto.keyword)
# print('===========확인3==========')
# print(keyword)
# session.commit()
# session.close()
# return 0
@classmethod
def find_update(cls, keyword):
emotion = session.query(cls).filter(cls.keyword == keyword).first()
# emotion.positive += 1
# emotion.pos_count += 1
# emotion.negative += 1
# emotion.neg_count += 1
# emotion.keyword += 1
# session.commit()
@classmethod
def find_by_keyword(cls, keyword):
print('==============find_by_keyword================')
a = cls.query.filter(cls.keyword != keyword).all()
b = cls.query.filter(cls.keyword == keyword).all()
if a:
# emotion = session.query(cls).filter(cls.keyword == keyword).first()
# emotion.positive += 1
# emotion.pos_count += 1
# emotion.negative += 1
# emotion.neg_count += 1
# session.commit()
return 0
elif b:
print('------------중복--------------')
# emotion = session.query(cls).filter(cls.keyword == keyword).first()
# emotion.positive += 1
# emotion.pos_count += 1
# emotion.negative += 1
# emotion.neg_count += 1
# session.commit()
return 1
# print(a)
# print(type(a))
# print(keyword)
# print(type(keyword))
# print(df)
# print(type(df))
# for word in a:
# if keyword in word:
# print('ok')
# s.append(keyword)
# break;
# print(s)
# if any(keyword in word for word in a):
# print('ok')
# print('===========s확인1==========')
# print(s)
# return cls.query.filter(EmotionDto.keyword == keyword).all()
@staticmethod
def test():
print(' TEST SUCCESS !!')
class StockNewsDao(StockNewsDto):
@staticmethod
def bulk():
emotion_dfo = EmotionDfo()
df = emotion_dfo.get_df(keyword)
session.bulk_insert_mappings(StockNewsDto, df.to_dict(orient="records"))
session.commit()
session.close()
@staticmethod
def save(emotion):
session.add(emotion)
session.commit()
@staticmethod
def count():
return session.query(func.count(StockNewsDto.no)).one()
@classmethod
def find_all(cls):
result = session.query(StockNewsDto).all()
session.close()
return result
# if __name__ == '__main__':
# EmotionDao.bulk()
```
#### File: emo/model/emotion_dto.py
```python
import csv
import json
import pandas as pd
from com_blacktensor.ext.db import db, openSession, engine
# from com_blacktensor.ext.routes import Resource
class EmotionDto(db.Model):
__tablename__ = 'emotion'
__table_args__={'mysql_collate' : 'utf8_general_ci'}
no : int = db.Column(db.Integer, primary_key = True, index = True)
positive : str = db.Column(db.String(10))
pos_count : int = db.Column(db.Integer)
negative : str = db.Column(db.String(10))
neg_count : int = db.Column(db.Integer)
keyword : str = db.Column(db.String(10))
# def __init__(self, no, positive, pos_count, negative, neg_count, keyword):
# self.no = no
# self.positive = positive
# self.pos_count = pos_count
# self.negative = negative
# self.neg_count = neg_count
# self.keyword = keyword
def __repr__(self):
return f'Emotion(no={self.no}, positive={self.positive}, pos_count={self.pos_count}, negative={self.negative},\
neg_count={self.neg_count}, keyword={self.keyword})'
def __str__(self):
return f'Emotion(no={self.no}, positive={self.positive}, pos_count={self.pos_count}, negative={self.negative},\
neg_count={self.neg_count}, keyword={self.keyword})'
@property
def json(self):
return {
'no' : self.no,
'positive' : self.positive,
'pos_count' : self.pos_count,
'negative' : self.negative,
'neg_count' : self.neg_count,
'keyword' : self.keyword
}
class StockNewsDto(db.Model):
__tablename__ = 'stock_news'
__table_args__={'mysql_collate' : 'utf8_general_ci'}
no : int = db.Column(db.Integer, primary_key = True, index = True)
title : str = db.Column(db.String(100))
keyword : str = db.Column(db.String(10))
# def __init__(self, no, positive, pos_count, negative, neg_count, keyword):
# self.no = no
# self.title = title
# self.keyword = keyword
def __repr__(self):
return f'Emotion(no={self.no}, title={self.title}, keyword={self.keyword})'
def __str__(self):
return f'Emotion(no={self.no}, title={self.title}, keyword={self.keyword})'
@property
def json(self):
return {
'no' : self.no,
'title' : self.title,
'keyword' : self.keyword
}
class EmotionVo:
no : int = 0
positive : str = ''
pos_count : int = 0
negative : str = ''
neg_count : int = 0
keyword : str = ''
class StockNewsVo:
no : int = 0
title : str = ''
keyword : str = ''
```
#### File: exc/model/exchange_kdd.py
```python
import pandas as pd
from pandas import DataFrame, Series
import requests as re
from bs4 import BeautifulSoup
import datetime as date
import time
my_folder = '/c/Users/Admin/VscProject/BlackTensor_Test'
class ExchangeKdd(object):
def market_index_kdd(self):
Data = DataFrame()
url_dict = {'미국 USD':'http://finance.naver.com/marketindex/exchangeDailyQuote.nhn?marketindexCd=FX_USDKRW',
'일본 JPY':'http://finance.naver.com/marketindex/exchangeDailyQuote.nhn?marketindexCd=FX_JPYKRW',
'유럽연합 EUR':'http://finance.naver.com/marketindex/exchangeDailyQuote.nhn?marketindexCd=FX_EURKRW',
'중국 CNY':'http://finance.naver.com/marketindex/exchangeDailyQuote.nhn?marketindexCd=FX_CNYKRW',
'WTI':'http://finance.naver.com/marketindex/worldDailyQuote.nhn?marketindexCd=OIL_CL&fdtc=2',
'국제 금':'http://finance.naver.com/marketindex/worldDailyQuote.nhn?marketindexCd=CMDT_GC&fdtc=2'}
for key in url_dict.keys():
date = []
value = []
for i in range(1,1000):
url = re.get(url_dict[key] + '&page=%s'%i)
url = url.content
html = BeautifulSoup(url,'html.parser')
tbody = html.find('tbody')
tr = tbody.find_all('tr')
'''마지막 페이지 까지 받기'''
if len(tbody.text.strip()) > 3:
for r in tr:
temp_date = r.find('td',{'class':'date'}).text.replace('.','-').strip()
temp_value = r.find('td',{'class':'num'}).text.strip()
date.append(temp_date)
value.append(temp_value)
else:
temp = DataFrame(value, index = date, columns = [key])
Data = pd.merge(Data, temp, how='outer', left_index=True, right_index=True)
print(key + '자료 수집 완료')
time.sleep(10)
break
Data.to_csv('%s/market_index.csv'%(my_folder))
print(Data)
return Data
K = market_index_kdd()
```
#### File: exc/resource/exchange.py
```python
#com_blacktensor/cop/exc/resource/exchange.py<gh_stars>0
# from flask import request
# from flask_restful import Resource, reqparse
# from flask import jsonify
# from com_blacktensor.cop.exc.model.exchange_kdd import ExchangeKdd
# from com_blacktensor.cop.exc.model.exchange_kdd import ExchangeDao
# # ============================================================
# # ================== =====================
# # ================== Resourcing =====================
# # ================== =====================
# # ============================================================
# class Exchange(Resource):
# def __init__(self):
# self.dao = FinanceDao()
# def get(self):
# result = self.dao.find_all()
# return jsonify([item.json for item in result])
# # return jsonify(str(result))
```
#### File: fin/model/finance_dto.py
```python
import csv
import json
import pandas as pd
from com_blacktensor.ext.db import db, openSession, engine
# from com_blacktensor.ext.routes import Resource
class FinanceDto(db.Model):
__tablename__ = 'finance'
__table_args__={'mysql_collate' : 'utf8_general_ci'}
no : int = db.Column(db.Integer, primary_key = True, index = True)
name : str = db.Column(db.String(10))
f_2015_12 : float = db.Column(db.Float)
f_2016_12 : float = db.Column(db.Float)
f_2017_12 : float = db.Column(db.Float)
f_2018_12 : float = db.Column(db.Float)
f_2019_12 : float = db.Column(db.Float)
f_2020_12 : float = db.Column(db.Float)
f_2021_12 : float = db.Column(db.Float)
f_2022_12 : float = db.Column(db.Float)
keyword : str = db.Column(db.String(10))
# def __init__(self, no, name, f_2015_12, f_2016_12, f_2017_12, f_2018_12, f_2019_12, f_2020_12, f_2021_12, f_2022_12, keyword):
# self.no = no
# self.name = name
# self.f_2015_12 = f_2015_12
# self.f_2016_12 = f_2016_12
# self.f_2017_12 = f_2017_12
# self.f_2018_12 = f_2018_12
# self.f_2019_12 = f_2019_12
# self.f_2020_12 = f_2020_12
# self.f_2021_12 = f_2021_12
# self.f_2022_12 = f_2022_12
# self.keyword = keyword
def __repr__(self):
return f'Finance(no={self.no}, name={self.name}, f_2015_12={self.f_2015_12}, \
f_2016_12={self.f_2016_12}, f_2017_12={self.f_2017_12}, f_2018_12={self.f_2018_12}, \
f_2019_12={self.f_2019_12}, f_2020_12={self.f_2020_12}, f_2021_12={self.f_2021_12}, \
f_2022_12={self.f_2022_12}, keyword={self.keyword})'
def __str__(self):
return f'Finance(no={self.no}, name={self.name}, f_2015_12={self.f_2015_12}, \
f_2016_12={self.f_2016_12}, f_2017_12={self.f_2017_12}, f_2018_12={self.f_2018_12}, \
f_2019_12={self.f_2019_12}, f_2020_12={self.f_2020_12}, f_2021_12={self.f_2021_12}, \
f_2022_12={self.f_2022_12}, keyword={self.keyword})'
@property
def json(self):
return {
'no' : self.no,
'name' : self.name,
'f_2015_12' : self.f_2015_12,
'f_2016_12' : self.f_2016_12,
'f_2017_12' : self.f_2017_12,
'f_2018_12' : self.f_2018_12,
'f_2019_12' : self.f_2019_12,
'f_2020_12' : self.f_2020_12,
'f_2021_12' : self.f_2021_12,
'f_2022_12' : self.f_2022_12,
'keyword' : self.keyword
}
class FinanceVo:
no : int = 0
name : str = ''
f_2015_12 : float = 0.0
f_2016_12 : float = 0.0
f_2017_12 : float = 0.0
f_2018_12 : float = 0.0
f_2019_12 : float = 0.0
f_2020_12 : float = 0.0
f_2021_12 : float = 0.0
f_2022_12 : float = 0.0
keyword : str = ''
```
#### File: covid/resources/covid_news.py
```python
from flask_restful import Resource
from flask import jsonify
from flask import request
from com_blacktensor.cop.news.covid.model.covid_news_dao import CovidNewsDao, CovidExtractionWordDao
from com_blacktensor.cop.news.covid.model.covid_news_df import CovidNewsDf
from com_blacktensor.cop.news.covid.model.covid_news_kdd import CovidNewsKDD
from com_blacktensor.util.checker import Checker
from com_blacktensor.util.file_handler import FileHandler as handler
import threading
import time
# ============================================================
# ================== =====================
# ================== Resourcing =====================
# ================== =====================
# ============================================================
class CovideNews(Resource):
def __init__(self):
self.news_dao = CovidNewsDao()
self.word_dao = CovidExtractionWordDao()
self.df = CovidNewsDf()
def get(self):
params = request.get_json()
keyword = params['keyword']
if keyword is not None:
count = self.news_dao.count()
if count == 0:
crawer = CovidNewsKDD()
print('get urls start')
start_time = time.time()
urls = crawer.get_naver_news_urls(keyword)
print(f'get urls end. processing time : {time.time() - start_time}s')
if not Checker.check_folder_path('./csv'):
handler.crete_folder('./csv')
handler.save_to_csv('./csv/result_Covid19_urls.csv', urls, ['urls'], 'utf-8-sig')
# url_df = handler.load_to_csv('./csv/result_Covid19_urls.csv', 'utf-8-sig')
# urls = url_df['urls'].tolist()
print('get contents from urls start')
start_time = time.time()
result_list = []
thread_count = 5
thread_list = []
div_count = int(len(urls) / thread_count) # 600
for idx in range(0, thread_count):
start_idx = idx * div_count
end_idx = (start_idx + div_count)
div_url = urls[int(start_idx):int(end_idx)]
thread = threading.Thread(target=crawer.get_contents_from_naver_urls, args=(div_url, result_list))
thread_list.append(thread)
thread.start()
for thread in thread_list:
thread.join()
print(f'get contents from urls end. processing time : {time.time() - start_time}s')
if not Checker.check_folder_path('./csv'):
handler.crete_folder('./csv')
handler.save_to_csv('./csv/result_covid19_news.csv', result_list, ['time','contents'], 'utf-8-sig')
df = self.df.get_df_news(result_list)
# df = handler.load_to_csv('./csv/result_Covid19_News.csv', 'utf-8-sig')
# print(df)
# print(df.isnull().values.any())
# counter = df['contents'].isnull().sum()
# print(f'contents is non : {counter}')
# counter = df['time'].isnull().sum()
# print(f'time is non : {counter}')
self.news_dao.save_data_bulk(df)
wordcount = self.word_dao.count()
if wordcount == 0:
df = self.df.get_df_news_word('./csv/result_covid19_news.csv', 'utf-8-sig')
# print(df)
self.word_dao.save_data_bulk(df)
result = self.word_dao.find_all()
return jsonify([item.json for item in result])
# result = self.news_dao.find_all()
# return jsonify(json_list=[item.json for item in result])
```
#### File: economy/model/economy_dao.py
```python
from com_blacktensor.ext.db import db, openSession
from com_blacktensor.cop.news.economy.model.economy_dto import EconomyNewsDto, EconomyExtractionWordDto
from sqlalchemy import func
class EconomyExtractionWordDao(EconomyExtractionWordDto):
@staticmethod
def save_data_bulk(datas):
Session = openSession()
session = Session()
session.bulk_insert_mappings(EconomyExtractionWordDto, datas.to_dict(orient='records'))
session.commit()
session.close()
@staticmethod
def count():
Session = openSession()
session = Session()
result = session.query(func.count(EconomyExtractionWordDto.no)).one()[0]
session.close()
return result
@classmethod
def find_all(self):
Session = openSession()
session = Session()
result = session.query(EconomyExtractionWordDto).all()
session.close()
return result
class EconomyNewsDao(EconomyNewsDto):
@staticmethod
def save_data_bulk(datas):
Session = openSession()
session = Session()
session.bulk_insert_mappings(EconomyNewsDto, datas.to_dict(orient='records'))
session.commit()
session.close()
@staticmethod
def count():
Session = openSession()
session = Session()
result = session.query(func.count(EconomyNewsDto.no)).one()[0]
session.close()
return result
@classmethod
def find_all(self):
Session = openSession()
session = Session()
result = session.query(EconomyNewsDto).all()
session.close()
return result
```
#### File: economy/model/economy_kdd.py
```python
from selenium import webdriver
import threading
import requests
import datetime
from bs4 import BeautifulSoup as bs
from com_blacktensor.util.summary_news import SummaryUtil as summary
# ============================================================
# ================== =====================
# ================== KDD =====================
# ================== =====================
# ============================================================
class EconomyNewsKdd(object):
def __init__(self):
self.lock = threading.Lock()
def get_economy_news_urls(self):
options = webdriver.ChromeOptions()
options.add_argument("headless")
options.add_argument("--log-level=3")
driver = webdriver.Chrome('./com_blackTensor/resources/driver/chromedriver.exe', options=options)
total_news_count = 0
end_news_count = 240
start_page = 1
end_page = -1
if end_news_count % 24 == 0:
end_page = int(end_news_count / 24)
else:
end_page = int((end_news_count / 24) + 1)
crawNewsList = []
driver.get(f'https://www.mk.co.kr/news/economy/economic-policy/?page={start_page}')
driver.implicitly_wait(2)
for _ in range(0, end_page + 1):
news_list = driver.find_elements_by_css_selector("div.list_area>dl")
for news in news_list:
a = news.find_element_by_css_selector('dt.tit>a')
href = a.get_attribute('href')
crawNewsList.append(href)
total_news_count += 1
if total_news_count == end_news_count:
break
if total_news_count == end_news_count:
break
else:
start_page += 1
driver.get(f'https://www.mk.co.kr/news/economy/economic-policy/?page={start_page}')
driver.implicitly_wait(2)
return crawNewsList
def get_contents_from_economy_urls(self, urls, result_list):
# print(f'thread Name : {threading.currentThread().getName()}, urls len : {len(urls)}')
options = webdriver.ChromeOptions()
options.add_argument("headless")
options.add_argument("--log-level=3")
driver = webdriver.Chrome('./com_blackTensor/resources/driver/chromedriver.exe', options=options)
for url in urls:
driver.get(url)
body = driver.find_element_by_css_selector("div.art_txt")
total_text = body.text
remove_idx = total_text.rfind('기자')
total_text = total_text[:remove_idx]
remove_idx = total_text.rfind('.')
total_text = total_text[:remove_idx]
create_time_text = driver.find_elements_by_css_selector("div.news_title_author>ul>li")[-2].text
create_time_text = create_time_text.replace('입력 : ', '')
create_time_text = create_time_text[:create_time_text.find(' ')]
with self.lock:
result_list.append({"time":create_time_text, "contents": summary.Summry_News(total_text.replace('\n', ''))})
driver.quit()
```
#### File: economy/resources/economy_news.py
```python
from flask_restful import Resource
from flask import jsonify
from com_blacktensor.cop.news.economy.model.economy_kdd import EconomyNewsKdd
from com_blacktensor.cop.news.economy.model.economy_df import EconomyNewsDf
from com_blacktensor.cop.news.economy.model.economy_dao import EconomyNewsDao, EconomyExtractionWordDao
from com_blacktensor.util.checker import Checker
from com_blacktensor.util.file_handler import FileHandler as handler
import threading
class EconomyNews(Resource):
def __init__(self):
self.news_dao = EconomyNewsDao()
self.word_dao = EconomyExtractionWordDao()
self.df = EconomyNewsDf()
def get(self):
econmoy_news_count = self.news_dao.count()
if econmoy_news_count == 0:
kdd = EconomyNewsKdd()
urls = kdd.get_economy_news_urls()
# print(datas)
if not Checker.check_folder_path('./csv'):
handler.crete_folder('./csv')
handler.save_to_csv('./csv/result_economy_urls.csv', urls, ['urls'], 'utf-8-sig')
result_list = []
thread_count = 6
thread_list = []
div_count = int(len(urls) / thread_count) # 600
for idx in range(0, thread_count):
start_idx = idx * div_count
end_idx = (start_idx + div_count)
div_url = urls[int(start_idx):int(end_idx)]
thread = threading.Thread(target=kdd.get_contents_from_economy_urls, args=(div_url, result_list))
thread_list.append(thread)
thread.start()
for thread in thread_list:
thread.join()
if not Checker.check_folder_path('./csv'):
handler.crete_folder('./csv')
handler.save_to_csv('./csv/result_economy_news.csv', result_list, ['time','contents'], 'utf-8-sig')
df = self.df.get_df_news(result_list)
self.news_dao.save_data_bulk(df)
econmoy_word_count = self.word_dao.count()
if econmoy_word_count == 0:
df = self.df.get_df_news_word('./csv/result_economy_news.csv', 'utf-8-sig')
self.word_dao.save_data_bulk(df)
result = self.word_dao.find_all()
return jsonify([item.json for item in result])
```
#### File: sto/model/stock_dao.py
```python
import csv
import pandas as pd
# # from sqlalchemy import create_engine
from com_blacktensor.ext.db import db, openSession, engine
from sqlalchemy import func
# from com_blacktensor.ext.routes import Resource
from com_blacktensor.cop.sto.model.stock_kdd import StockKdd
from com_blacktensor.cop.sto.model.stock_dto import StockDto
from com_blacktensor.cop.sto.model.stock_dfo import StockDfo
from com_blacktensor.cop.emo.model.emotion_kdd import keyword
Session = openSession()
session = Session()
class StockDao(StockDto):
@staticmethod
def bulk():
stock_dfo = StockDfo()
# dfo = stock_dfo.get_df(keyword)
dfo = stock_dfo.get_csv(keyword)
session.bulk_insert_mappings(StockDto, dfo.to_dict(orient='records'))
session.commit()
session.close()
@staticmethod
def save(emotion):
session.add(emotion)
session.commit()
@classmethod
def count(cls):
return session.query(func.count(cls.date)).one()
@classmethod
def find_all(cls):
return session.query(cls).all()
@staticmethod
def test():
print(' TEST SUCCESS !!')
```
#### File: com_blacktensor/ext/db.py
```python
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
db = SQLAlchemy()
config = {
'user' : 'blackTensor',
'password' : '<PASSWORD>',
'host': '127.0.0.1',
'port' : '3306',
'database' : 'blacktensor'
}
charset = {'utf8':'utf8'}
url = f"mysql+mysqlconnector://{config['user']}:{config['password']}@{config['host']}:{config['port']}/{config['database']}?charset=utf8"
Base = declarative_base()
engine = create_engine(url)
def openSession():
return sessionmaker(bind=engine)
```
#### File: Jelly6489/Stock-Proj/run.py
```python
from flask import Flask, render_template, request
from flask_restful import Resource, Api
from main import app
from com_blacktensor.cop.emo.resource.emotion import Emotion
app.run(host='192.168.0.10', port='8080', debug=True)
# app.run(host='127.0.0.1', port='8080', debug=True)
# '''
# app = Flask(__name__)
# api = Api(app)
# @app.route('/')
# def main_get(num=None):
# return render_template(num=num)
# if __name__ == "__main__":
# app.run(host='192.168.0.10', port='8080', debug=True)
# return render_template('####.html', num=num)
# @app.route('/api/emotion', method = ['GET', 'POST'])
# def stock_name():
# if request.method == 'GET':
# keyword = request.args.get('keyword')
# print(request.form)
# # return render_template('.jsx', keyword = keyword)
# return render_template(keyword = keyword)
# # return 0
# if __name__ == "__main__":
# app.run(host='192.168.0.10', port='8080', debug=True)
# '''
'''
@app.route('/api/emotion', method = ['POST', 'GET'])
def stock_name(num=None):
if request.method == 'POST':
# temp = request.form['num']
pass
elif request.method == 'GET':
temp = request.args.get('num')
# temp = str(temp)
temp1 = request.args.get('keyword')
print('Ok!')
# return render_template('####.html', num=temp, keyword=temp1)
if __name__ == '__main__':
app.run(host='192.168.0.10', port='8080', debug=True)
'''
'''
app = Flask(__name__)
api = Api(app)
class Rest(Resource):
def get(self):
return {'rest': '한국 !'}
# return Emotion()
def post(self):
return {'rest': 'post success !'}
api.add_resource(Rest, '/api')
if __name__ == '__main__':
app.run(host='192.168.0.10', port='8080', debug=True)
'''
'''
app = Flask(__name__)
api = Api(app)
@app.route('/test')
def test():
if request.method == 'Post':
return {'test' : 'test Success!'}
def get():
return {'get' : 'get Success!'}
def post():
return {'post' : 'post Success!'}
if __name__ == '__main__':
app.run(debug=True, host='127.0.0.1', port=8080)
'''
'''
app = Flask(__name__)
@app.route('/')
def index():
return 'Hello world!'
if __name__ == 'main':
app.run(host='192.168.0.10', port='8080', debug=True)
'''
```
|
{
"source": "jelly-ape/apicms",
"score": 2
}
|
#### File: apicms/core/group.py
```python
class Group(object):
def __init__(self, name):
self.name = name
```
|
{
"source": "jelly-ape/dts_server",
"score": 2
}
|
#### File: api/handlers/ranking_handler.py
```python
import pymongo
import api.handlers.base_handler
import api.modules.photo_manager
import api.libs.log
class RankingHandler(api.handlers.base_handler.BaseHandler):
def __init__(self, *args, **kwargs):
super(RankingHandler, self).__init__(*args, **kwargs)
self._logger = api.libs.log.get_logger('ranking')
def __get_ranking(self):
photo_mgr = api.modules.photo_manager.PhotoManager()
condition = self._for_audit()
photos = photo_mgr.get(condition).sort('likes', pymongo.DESCENDING)
photos = photos.skip(self._params['skip']).limit(self._params['max'])
self._rets['photos'] = []
for photo in photos:
url = self._make_url(photo['url'])
photo_id = str(photo['_id'])
likes = int(photo.get('likes', 0))
if likes > 0:
self._rets['photos'].append({
'id': photo_id,
'url': url,
'likes': likes,
})
def process(self):
self.__get_ranking()
```
#### File: modules/haha/random_manager.py
```python
import bson.objectid
import random
import pymongo
@api.libs.utils.singleton
class RandomManager(object):
"""id 是固定的, 只有 id 和 photo_id 两个维度
"""
def __init__(self):
self.__store = self.__init_store()
def __init_store(self):
"""初始化储存
"""
store = api.libs.database.Database().photos
store.create_index(
[('rand', pymongo.ASCENDING)],
unique=True,
)
return store
def update(self):
"""更新所有照片
"""
# 获取所有照片
```
#### File: dts_server/src/import_data.py
```python
import pymongo
import sys
import json
import random
import api.modules.photo_manager
import api.modules.album_manager
def main():
photo_mgr = api.modules.photo_manager.PhotoManager()
album_mgr = api.modules.album_manager.AlbumManager()
for line in sys.stdin:
line = line.strip()
js = json.loads(line)
album = {
'press': js['press'],
'name': js['album'],
'cover': js['cover'],
'models': js['models'],
}
album_mgr.insert(album)
photo = {
'press': js['press'],
'album_name': js['album'],
'url': js['url'],
'rand': random.random(),
'likes': 0,
}
photo_mgr.insert(photo)
if __name__ == '__main__':
main()
```
|
{
"source": "jelly/arch-repo-sec-tools",
"score": 3
}
|
#### File: jelly/arch-repo-sec-tools/checksec.py
```python
from re import search
from elftools.elf.dynamic import DynamicSection
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
STACK_CHK = set(["__stack_chk_fail", "__stack_smash_handler"])
class Elf:
def __init__(self, fileobj):
self.fileobj = fileobj
self._elffile = None
@property
def elffile(self):
if not self._elffile:
self._elffile = ELFFile(self.fileobj)
return self._elffile
def _file_has_magic(self, fileobj, magic_bytes):
length = len(magic_bytes)
magic = fileobj.read(length)
fileobj.seek(0)
return magic == magic_bytes
def is_elf(self):
"Take file object, peek at the magic bytes to check if ELF file."
return self._file_has_magic(self.fileobj, b"\x7fELF")
def dynamic_tags(self, key="DT_RPATH"):
for section in self.elffile.iter_sections():
if not isinstance(section, DynamicSection):
continue
for tag in section.iter_tags():
if tag.entry.d_tag == key:
return True
return False
def is_relro(self):
if self.elffile.num_segments() == 0:
return False
have_relro = False
for segment in self.elffile.iter_segments():
if search("GNU_RELRO", str(segment['p_type'])):
have_relro = True
break
if self.dynamic_tags("DT_BIND_NOW") and have_relro:
return True
else:
# Partial
return False
return False
def canary(self):
for section in self.elffile.iter_sections():
if not isinstance(section, SymbolTableSection):
continue
if section['sh_entsize'] == 0:
continue
for _, symbol in enumerate(section.iter_symbols()):
if symbol.name in STACK_CHK:
return True
return False
def pie(self):
header = self.elffile.header
if self.dynamic_tags("EXEC"):
return False
if "ET_DYN" in header['e_type']:
if self.dynamic_tags("DT_DEBUG"):
return True
else:
# DSO is PIE
return True
return False
```
#### File: jelly/arch-repo-sec-tools/repo_sec_checker.py
```python
from argparse import ArgumentParser
from functools import partial
from glob import glob
from io import BytesIO
from multiprocessing import Pool
from multiprocessing import cpu_count
from os.path import join
from os.path import basename
from libarchive import file_reader
from tabulate import tabulate
from checksec import Elf
ARCHS = ['x86_64']
PKG_EXT = '.tar.xz'
DEFAULT_SOURCE_DIR = '/srv/ftp'
SOURCE_WHITELIST = ['core', 'extra', 'community', 'multilib']
VALID_DIRS = ['usr/bin/']
class Result:
# TODO: use slots, measure
def __init__(self, filename):
self.filename = filename
self.nopie = []
self.norelro = []
self.nocanary = []
@property
def not_secure(self):
return self.nopie or self.norelro or self.nocanary
@property
def name(self):
return basename(self.filename)
@property
def table(self):
return [self.name, not self.norelro, not self.nocanary, not self.nopie]
@property
def results(self):
return {'relro': self.norelro, 'canary': self.nocanary,
'pie': self.nopie}
def __repr__(self):
return f"Result({self.name})"
def read_file(full, filename):
res = Result(filename)
with file_reader(filename) as pkg:
for entry in pkg:
# break if any of the files are not secure, speeding up scanning
if not full and res.not_secure:
break
if not entry.isfile:
continue
if not any(entry.name.startswith(d) for d in VALID_DIRS):
continue
fp = BytesIO(b''.join(entry.get_blocks()))
elf = Elf(fp)
if not elf.is_elf():
continue
if not elf.pie():
res.nopie.append(entry.name)
if not elf.is_relro():
res.norelro.append(entry.name)
if not elf.canary():
res.nocanary.append(entry.name)
return res
def main(full, verbose, repodir, repos, processes=cpu_count() * 2):
tasks = []
for subdir in repos:
for arch in ARCHS:
directory = join(repodir, subdir, 'os', arch)
for filename in glob(join(directory, f'*{PKG_EXT}')):
tasks.append((filename))
with Pool(processes=processes) as pool:
func = partial(read_file, verbose)
results = pool.map(func, tasks)
table = [result.table for result in results if result.not_secure]
print(tabulate(table, headers=["Name", "FULL RELRO", "CANARY", "PIE"]))
if verbose:
print()
print('Verbose\n-------\n')
for result in results:
if not result.not_secure:
continue
for hardening, files in result.results.items():
for f in files:
print(f'Missing {hardening} for {f}')
if __name__ == '__main__':
parser = ArgumentParser(description='Repro Sec Checker')
parser.add_argument('--repodir', default=DEFAULT_SOURCE_DIR, help=f'root directory of the repo (default: {DEFAULT_SOURCE_DIR})')
parser.add_argument('--repos', nargs='+', type=str, default=SOURCE_WHITELIST, help=f'default repo\'s to scan (default: {SOURCE_WHITELIST}')
parser.add_argument('--processes', type=int, default=cpu_count() * 2, help=f'number of parallel processes (default: {cpu_count()*2})')
parser.add_argument('--verbose', action='store_true', help='output the binary\'s which lack a hardening feature')
parser.add_argument('--full', action='store_true', help=f'Scan every binary instead of stopping when one binary is not fully hardened')
args = parser.parse_args()
main(args.full, args.verbose, args.repodir, args.repos, args.processes)
```
|
{
"source": "jelly/arch-repro-test",
"score": 3
}
|
#### File: jelly/arch-repro-test/arch-repro-test.py
```python
import tarfile
import os
import subprocess
import sys
from datetime import datetime
REPRO_DIR = 'repro'
SERVER_FORMAT = 'Server = https://archive.archlinux.org/repos/{}/{}/{}/$repo/os/$arch\n'
def parse_installed(data):
data = data.decode('utf-8')
packages = []
for line in data.split('\n'):
if line.startswith('installed'):
pkg = line.split(' = ')[1]
packages.append(pkg)
return packages
def extract_pkgbuild_hash(data):
data = data.decode('utf-8')
packages = []
for line in data.split('\n'):
if line.startswith('pkgbuild_sha256sum')
return line.splt(' = ')[1].strip()
def extract_builddate(data):
for line in data.split(b'\n'):
if line.startswith(b'builddate'):
return line.split(b' = ')[1]
def main(filename):
tar = tarfile.open(filename)
for member in tar.getmembers():
if member.name == '.BUILDINFO':
entry = tar.extractfile(member)
buildinfo = entry.read()
if member.name == '.PKGINFO':
entry = tar.extractfile(member)
builddate_str = extract_builddate(entry.read())
builddate = datetime.fromtimestamp(int(builddate_str))
if not builddate:
print('No builddate, cannot reproduce')
print("builddate: {}".format(builddate))
print("month: {}, day: {}".format(builddate.month, builddate.day))
with open('mirrorlist', 'w') as mirrorlist:
mirrorlist.write(SERVER_FORMAT.format(builddate.year, builddate.month, builddate.day))
if not buildinfo:
print('No .BUILDINFO found in {}'.format(filename))
# Parse buldinfo
#packages = parse_installed(buildinfo)
#pkgbuild_sha256sum = extract_pkgbuild_hash(buildinfo)
#pkgname = ''.join(filename.split('-')[:-3])
#os.system('asp -a x86_64 checkout {}'.format(pkgname))
#x = "curl/repos/core-x86_64/PKGBUILD'
#print(packages)
# Handle stripping pkgver-pkgrel
pkgnames = ['-'.join(pkg.split('-')[:-2]) for pkg in packages]
install = ' '.join(pkgnames)
# pyalpm?!!!
# Pacstrap
if os.path.exists(REPRO_DIR):
os.system('sudo rm -rf {}'.format(REPRO_DIR))
os.makedirs(REPRO_DIR)
# Download packages in parallel
# Fetch them in parallel https://wiki.archlinux.org/index.php/Arch_Linux_Archive#.2Fpackages
print('Install build chroot')
os.system('sudo mkarchroot -C pacman.conf {}/root {}'.format(REPRO_DIR, install))
print('Verify if all packages are installed')
# Verify installed version matches
p = subprocess.Popen('pacman -r {}/root -Q'.format(REPRO_DIR), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines = p.stdout.readlines()
if len(lines) != len(packages):
print('Missing or too many installed packages installed in chroot')
for line in lines:
line = line.strip()
line = line.decode('utf-8')
# Replace space with - to match the BUILDINFO format
line = line.replace(' ', '-')
if line not in packages:
for pkg in packages:
pkgname = pkg.split('-')[:-2]
if line.startswith(pkgname):
break
print('Wrong installed package: {}, required: {}'.format(line, pkgname))
retval = p.wait()
# ASP x86_64
# Build!
if __name__ == "__main__":
if len(sys.argv) < 2:
print('missing argument file.tar.xz')
sys.exit(1)
main(sys.argv[1])
```
|
{
"source": "jellyb0y/easy-requirements",
"score": 2
}
|
#### File: easy-requirements/ezdeps/build.py
```python
import setuptools
import sys
import os
from shutil import copyfile
from .manager import Manager
from .utils.constants import PACKAGE_FILE, CUR_DIR
from .utils.packages import get_pip_requirements
def build(manager: Manager):
sys.argv = [sys.argv[0], 'sdist', 'bdist_wheel']
temp_file = f'{CUR_DIR}/{manager.name}/.ezdeps.json'
copyfile(PACKAGE_FILE, temp_file)
setuptools.setup(
name=manager.name,
version=manager.version,
author='name' in manager.author and manager.author['name'],
author_email='email' in manager.author and manager.author['email'],
description=manager.description,
url=manager.url,
scripts=manager.scripts,
long_description=manager.get_documentation(),
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
install_requires=get_pip_requirements(manager.get_requirements('default')),
include_package_data=manager.include_package_data,
classifiers=manager.classifiers,
python_requires=manager.python_requires
)
os.remove(temp_file)
```
|
{
"source": "jellybean4/yosaipy2",
"score": 2
}
|
#### File: core/concurrency/concurrency.py
```python
import threading
class StoppableScheduledExecutor(threading.Thread):
def __init__(self, my_func, interval):
super(StoppableScheduledExecutor, self).__init__()
self.event = threading.Event()
self.my_func = my_func
self.interval = interval # in seconds
def stop(self):
self.event.set()
self.join()
def run(self):
while True:
self.my_func()
if self.event.wait(self.interval):
return
# yosai.core.omits ThreadContext because it is replaced by the standard library
# threading.local() object
```
#### File: core/conf/yosaisettings.py
```python
from pathlib import Path
import yaml
import os
empty = object()
class LazySettings(object):
"""
LazyConfig proxies the custom-else-default settings configuration process.
Required settings that are not user-defined (custom) will default to those
specified in default settings.
"""
def __init__(self, env_var=None, file_path=None):
self._wrapped = empty
if not env_var and not file_path:
raise TypeError('Must specifify either an env_var or file_path.')
self.__dict__["env_var"] = env_var
self.__dict__["file_path"] = file_path
def __getattr__(self, name):
if self._wrapped is empty:
self._setup()
return getattr(self._wrapped, name, None)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
@property
def configured(self):
return self._wrapped is not empty
def _setup(self):
"""
Load the settings module referenced by env_var. This environment-
defined configuration process is called during the settings
configuration process.
"""
envvar = self.__dict__['env_var']
if envvar:
settings_file = os.environ.get(envvar)
else:
settings_file = self.__dict__['file_path']
if not settings_file:
msg = ("Requested settings, but none can be obtained for the envvar."
"Since no config filepath can be obtained, a default config "
"will be used.")
raise OSError(msg)
self._wrapped = Settings(settings_file)
class Settings:
def __init__(self, settings_filepath):
self.load_config(settings_filepath)
@staticmethod
def get_config(filepath):
if not os.path.exists(filepath):
raise OSError('could not locate: ' + str(filepath))
with Path(filepath).open() as stream:
config = yaml.load(stream)
return config
def load_config(self, filepath):
try:
config = self.get_config(filepath)
tempdict = {}
tempdict.update(self.__dict__)
tempdict.update(config)
self.__dict__ = tempdict
except (TypeError, ValueError) as exc:
raise exc.__class__('Settings failed to load attrs')
```
#### File: core/event/abcs.py
```python
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class EventBus(object):
"""
An event bus can publish events to event subscribers as well as provide a
mechanism for registering and unregistering event subscribers.
An event bus enables a publish/subscribe paradigm within Yosai -- components
can publish or consume events they find relevant without needing to be
tightly coupled to other components. This affords great flexibility within
Yosai by promoting loose coupling and high cohesion between components and
a much safer pluggable architecture.
Sending Events
-----------------
If a component wishes to publish events to other components:::
event_bus.send_message(topic, *kwargs)
The event bus dispatches the event 'message' to components that wish to receive
events of that type (known as subscribers).
Receiving Events
------------------
A component can receive events of interest by doing the following.
For each event topic you wish to consume, create a callback method
that will be called when an specific type of event is communicated across
the event bus. Register the callback with the event_bus:::
event_bus.subscribe(topic, callback)
"""
@abstractmethod
def send_message(self, topic_name, **kwargs):
pass
@abstractmethod
def subscribe(self, _callable, topic_name):
pass
@abstractmethod
def unsubscribe(self, listener, topic_name):
pass
```
#### File: core/logging/slogging.py
```python
from logging import config
from yosaipy2.core.logging.formatters import (
JSONFormatter,
)
def load_logconfig():
default_logging = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'print_format': {
'format': "%(asctime)s\t%(levelname)s:%(name)s\t%(message)s",
},
'json_format': {
'()': JSONFormatter
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'formatter': 'print_format'},
'debug_file_handler': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'DEBUG',
'filename': '/var/log/yosai/debug.log',
'formatter': 'json_format',
'maxBytes': 10485760,
'backupCount': 20,
'encoding': 'utf8'},
'info_file_handler': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'filename': '/var/log/yosai/info.log',
'formatter': 'json_format',
'maxBytes': 10485760,
'backupCount': 20,
'encoding': 'utf8'},
'error_file_handler': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'ERROR',
'filename': '/var/log/yosai/errors.log',
'formatter': 'json_format',
'maxBytes': 10485760,
'backupCount': 20,
'encoding': 'utf8'}
},
'loggers': {
'yosai_logger': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False
}
},
'root': {
'level': 'DEBUG',
'handlers': ['console', 'debug_file_handler', 'info_file_handler',
'error_file_handler']
}
}
config.dictConfig(default_logging)
```
#### File: core/mgt/abcs.py
```python
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class RememberMeManager(object):
"""
A RememberMeManager is responsible for remembering a Subject's identity
across that Subject's sessions within the application.
"""
@abstractmethod
def get_remembered_identifiers(self, subject_context):
"""
Based on the specified subject context map being used to build a
Subject instance, returns any previously remembered identifier for the
subject for automatic identity association (aka 'Remember Me').
The context map is usually populated by a Subject.Builder
implementation. See the SubjectFactory class constants for
Yosai's known map keys.
:param subject_context: the contextual data, usually provided by a
Builder implementation, that is being used to
construct a Subject instance
:returns: the remembered identifier or None if none could be acquired
"""
pass
@abstractmethod
def forget_identity(self, subject_context):
"""
Forgets any remembered identity corresponding to the subject context
map being used to build a subject instance.
The context map is usually populated by a Subject.Builder
implementation.
See the SubjectFactory class constants for Shiro's known map keys.
:param subject_context: the contextual data, usually provided by a
Subject.Builder implementation, that
is being used to construct a Subject instance
"""
pass
@abstractmethod
def on_successful_login(self, subject, authc_token, account):
"""
Reacts to a successful authentication attempt, typically saving the
identifier to be retrieved ('remembered') for future system access.
:param subject: the subject that executed a successful authentication
attempt
:param authc_token: the authentication token submitted resulting in a
successful authentication attempt
:param account: the account returned as a result of the
successful authentication attempt
"""
pass
@abstractmethod
def on_failed_login(self, subject, token, auth_exc):
"""
Reacts to a failed authentication attempt, typically by forgetting any
previously remembered identifier for the Subject.
:param subject: the subject that executed the failed authentication
attempt
:param token: the authentication token submitted resulting in the
failed authentication attempt
:param auth_exc: the authentication exception thrown as a result of
the failed authentication attempt
"""
pass
@abstractmethod
def on_logout(self, subject):
"""
Reacts to a Subject logging out of the application, typically by
forgetting any previously remembered identifier for the Subject.
:param subject: the subject logging out
"""
pass
@six.add_metaclass(ABCMeta)
class SecurityManager(object):
"""
A SecurityManager executes ALL security operations for ALL Subjects (aka users)
across a single application.
The interface itself primarily exists as a convenience - it extends the
Authenticator, Authorizer, and SessionManager abc-interfaces, thereby
consolidating these behaviors into a single point of reference. For most
Yosai usages, this simplifies configuration and tends to be a more
convenient approach than referencing Authenticator, Authorizer, and
SessionManager instances individually. Instead, one only needs to interact
with a single SecurityManager instance.
In addition to the above three interfaces, this interface provides a number
of methods supporting the behavior of Subject(s). A Subject executes
authentication, authorization, and session operations for a *single* user,
and as such can only be managed by A SecurityManager that is aware of all
three functions. The three parent interfaces on the other hand do not
'know' about Subject(s) so as to ensure a clean separation of concerns.
Usage Note
----------
In actuality, the large majority of application programmers won't interact
with a SecurityManager very often, if at all. *Most* application
programmers only care about security operations for the currently executing
user, usually obtained from the yosai.subject attribute.
Framework developers, however, might find working directly with a
SecurityManager useful.
"""
@abstractmethod
def login(self, subject, authc_token):
"""
Logs in the specified Subject using the given authc_token, returning
an updated Subject instance that reflects the authenticated state when
authentication is successful or raising an AuthenticationException if
authentication is not.
Note that most application developers should probably not call this
method directly unless they have a good reason for doing so. The
preferred way to log in a Subject is to call subject.login(authc_token)
after acquiring the Subject from yosai.subject.
Framework developers, however, may find that directly calling this
method useful in certain cases.
:param subject: the subject against which the authentication attempt
will occur
:param authc_token: the token representing the Subject's
identifier(s) and credential(s)
:returns: the subject instance reflecting the authenticated state after
a successful attempt
:raises AuthenticationException: if the login attempt failed
"""
pass
@abstractmethod
def logout(self, subject):
"""
Logs out the specified Subject from the system.
Note that most application developers should not call this method
unless they have a good reason for doing so. The preferred way to
logout a Subject is to call Subject.logout(), and not call the
SecurityManager's logout directly.
Framework developers, however, may find directly calling this method
useful in certain cases.
:param subject: the subject to log out
"""
pass
@abstractmethod
def create_subject(self, authc_token=None, account_id=None, existing_subject=None, subject_context=None):
"""
Creates a Subject instance that reflects the specified contextual data.
The context can be anything needed by this SecurityManager to
construct a Subject instance. Most Yosai end-users will never call
this method -- it exists primarily for framework development and to
support any underlying custom SubjectFactory implementations
that may be used by the SecurityManager.
Usage
----------
After calling this method, the returned instance is *not* bound to the
application for further use. Callers are expected to know that
Subject instances have local scope only and any other further use
beyond the calling method must be managed explicitly.
:param account_id:
:param authc_token:
:param existing_subject:
:param subject_context: any data needed to direct how the Subject should be
constructed
:returns: the Subject instance that reflects the specified
initialization data
"""
pass
```
#### File: core/mgt/mgt.py
```python
import copy
from yosaipy2.core.utils.utils import get_logger
from cryptography.fernet import Fernet
from abc import abstractmethod
from yosaipy2.core import (
AdditionalAuthenticationRequired,
AuthenticationException,
DefaultAuthenticator,
DelegatingSubject,
NativeSessionManager,
SessionKey,
SubjectContext,
SubjectStore,
InvalidSessionException,
ModularRealmAuthorizer,
RememberMeSettings,
event_bus,
mgt_abcs,
)
class AbstractRememberMeManager(mgt_abcs.RememberMeManager):
"""
Abstract implementation of the ``RememberMeManager`` interface that handles
serialization and encryption of the remembered user identity.
The remembered identity storage location and details are left to
subclasses.
Default encryption key
-----------------------
This implementation uses the Fernet API from PyCA's cryptography for
symmetric encryption. As per the documentation, Fernet uses AES in CBC mode
with a 128-bit key for encryption and uses PKCS7 padding:
https://cryptography.io/en/stable/fernet/
It also uses a default, generated symmetric key to both encrypt and decrypt
data. As AES is a symmetric cipher, the same key is used to both encrypt
and decrypt data, BUT NOTE:
Because Yosai is an open-source project, if anyone knew that you were
using Yosai's default key, they could download/view the source, and with
enough effort, reconstruct the key and decode encrypted data at will.
Of course, this key is only really used to encrypt the remembered
``IdentifierCollection``, which is typically a user id or username. So if you
do not consider that sensitive information, and you think the default key
still makes things 'sufficiently difficult', then you can ignore this
issue.
However, if you do feel this constitutes sensitive information, it is
recommended that you provide your own key and set it via the cipher_key
property attribute to a key known only to your application,
guaranteeing that no third party can decrypt your data.
You can generate your own key by importing fernet and calling its
generate_key method:
>>> from cryptography.fernet import Fernet
>>> key = Fernet.generate_key()
your key will be a byte string that looks like this:
b'<KEY>
copy and paste YOUR newly generated byte string, excluding the
bytestring notation, into its respective place in /conf/yosai.core.settings.json
following this format:
default_cipher_key = "<KEY>
"""
def __init__(self, settings):
self._logger = get_logger()
default_cipher_key = RememberMeSettings(settings).default_cipher_key
# new to yosai.core.
self.serialization_manager = None
self.encryption_cipher_key = default_cipher_key
self.decryption_cipher_key = default_cipher_key
@abstractmethod
def forget_identity(self, subject):
"""
Forgets (removes) any remembered identity data for the specified
Subject instance.
:param subject: the subject instance for which identity data should be
forgotten from the underlying persistence mechanism
"""
pass
def on_successful_login(self, subject, authc_token, account_id):
"""
Reacts to the successful login attempt by first always
forgetting any previously stored identity. Then if the authc_token
is a ``RememberMe`` type of token, the associated identity
will be remembered for later retrieval during a new user session.
:param subject: the subject whose identifying attributes are being
remembered
:param authc_token: the token that resulted in a successful
authentication attempt
:param account_id: id of authenticated account
"""
# always clear any previous identity:
self.forget_identity(subject)
# now save the new identity:
if authc_token.is_remember_me:
self.remember_identity(subject, authc_token, account_id)
else:
msg = ("AuthenticationToken did not indicate that RememberMe is "
"requested. RememberMe functionality will not be executed "
"for corresponding account.")
self._logger.debug(msg)
def remember_identity(self, subject, authc_token, account_id):
"""
Yosai consolidates rememberIdentity, an overloaded method in java,
to a method that will use an identifier-else-account logic.
Remembers a subject-unique identity for retrieval later. This
implementation first resolves the exact identifying attributes to
remember. It then remembers these identifying attributes by calling
remember_identity(Subject, IdentifierCollection)
:param subject: the subject for which the identifying attributes are
being remembered
:param authc_token: ignored in the AbstractRememberMeManager
:param account_id: the account id of authenticated account
"""
try:
identifiers = self.get_identity_to_remember(subject, account_id)
except AttributeError:
msg = "Neither account_id nor identifier arguments passed"
raise AttributeError(msg)
encrypted = self.convert_identifiers_to_bytes(identifiers)
self.remember_encrypted_identity(subject, encrypted)
def get_identity_to_remember(self, subject, account_id):
"""
Returns the account's identifier and ignores the subject argument
:param subject: the subject whose identifiers are remembered
:param account_id: the account resulting from the successful authentication attempt
:returns: the IdentifierCollection to remember
"""
# This is a placeholder. A more meaningful logic is implemented by subclasses
return account_id
def convert_identifiers_to_bytes(self, identifiers):
"""
Encryption requires a binary type as input, so this method converts
the identifier collection object to one.
:type identifiers: a serializable IdentifierCollection object
:returns: a bytestring
"""
# serializes to bytes by default:
return self.encrypt(self.serialization_manager.serialize(identifiers))
@abstractmethod
def remember_encrypted_identity(self, subject, encrypted):
"""
Persists the identity bytes to a persistent store
:param subject: the Subject for whom the identity is being serialized
:param encrypted: the serialized bytes to be persisted.
"""
pass
def get_remembered_identifiers(self, subject_context):
identifiers = None
try:
encrypted = self.get_remembered_encrypted_identity(subject_context)
if encrypted:
identifiers = self.convert_bytes_to_identifiers(encrypted,
subject_context)
except Exception as ex:
identifiers = \
self.on_remembered_identifiers_failure(ex, subject_context)
return identifiers
@abstractmethod
def get_remembered_encrypted_identity(self, subject_context):
"""
Based on the given subject context data, retrieves the previously
persisted serialized identity, or None if there is no available data.
:param subject_context: the contextual data, that
is being used to construct a Subject instance.
:returns: the previously persisted serialized identity, or None if
no such data can be acquired for the Subject
"""
pass
def convert_bytes_to_identifiers(self, encrypted, subject_context):
"""
If a cipher_service is available, it will be used to first decrypt the
serialized message. Then, the bytes are deserialized and returned.
:param encrypted: the bytes to decrypt and then deserialize
:param subject_context: the contextual data, that is being
used to construct a Subject instance
:returns: the de-serialized identifier
"""
# unlike Shiro, Yosai assumes that the message is encrypted:
decrypted = self.decrypt(encrypted)
return self.serialization_manager.deserialize(decrypted)
def on_remembered_identifiers_failure(self, exc, subject_context):
"""
Called when an exception is thrown while trying to retrieve identifier.
The default implementation logs a debug message and forgets ('unremembers')
the problem identity by calling forget_identity(subject_context) and
then immediately re-raises the exception to allow the calling
component to react accordingly.
This method implementation never returns an object - it always rethrows,
but can be overridden by subclasses for custom handling behavior.
This most commonly would be called when an encryption key is updated
and old identifier are retrieved that have been encrypted with the
previous key.
:param exc: the exception that was thrown
:param subject_context: the contextual data that is being
used to construct a Subject instance
:raises: the original Exception passed is propagated in all cases
"""
msg = ("There was a failure while trying to retrieve remembered "
"identifier. This could be due to a configuration problem or "
"corrupted identifier. This could also be due to a recently "
"changed encryption key. The remembered identity will be "
"forgotten and not used for this request. ", exc)
self._logger.debug(msg)
self.forget_identity(subject_context)
# propagate - security manager implementation will handle and warn
# appropriately:
raise exc
def encrypt(self, serialized):
"""
Encrypts the serialized message using Fernet
:param serialized: the serialized object to encrypt
:type serialized: bytes
:returns: an encrypted bytes returned by Fernet
"""
fernet = Fernet(self.encryption_cipher_key)
return fernet.encrypt(serialized)
def decrypt(self, encrypted):
"""
decrypts the encrypted message using Fernet
:param encrypted: the encrypted message
:returns: the decrypted, serialized identifier collection
"""
fernet = Fernet(self.decryption_cipher_key)
return fernet.decrypt(encrypted)
def on_failed_login(self, subject, authc_token, ae):
"""
Reacts to a failed login by immediately forgetting any previously
remembered identity. This is an additional security feature to prevent
any remenant identity data from being retained in case the
authentication attempt is not being executed by the expected user.
:param subject: the subject which executed the failed login attempt
:param authc_token: the authentication token resulting in a failed
login attempt - ignored by this implementation
:param ae: the exception thrown as a result of the failed login
attempt - ignored by this implementation
"""
self.forget_identity(subject)
def on_logout(self, subject):
"""
Reacts to a subject logging out of the application and immediately
forgets any previously stored identity and returns.
:param subject: the subject logging out
"""
self.forget_identity(subject)
# also known as ApplicationSecurityManager in Shiro 2.0 alpha:
class NativeSecurityManager(mgt_abcs.SecurityManager):
def __init__(self,
yosai,
settings,
realms=None,
cache_handler=None,
authenticator=None,
authorizer=ModularRealmAuthorizer(),
serialization_manager=None,
session_manager=None,
remember_me_manager=None,
subject_store=SubjectStore()):
self._logger = get_logger()
self.yosai = yosai
self.subject_store = subject_store
self.realms = realms
self.remember_me_manager = remember_me_manager
if not session_manager:
session_manager = NativeSessionManager(settings)
self.session_manager = session_manager
self.authorizer = authorizer
if not authenticator:
authenticator = DefaultAuthenticator(settings)
self.authenticator = authenticator
if serialization_manager and self.remember_me_manager:
self.remember_me_manager.serialization_manager = serialization_manager
self.apply_event_bus(event_bus)
self.apply_cache_handler(cache_handler)
self.apply_realms()
def apply_cache_handler(self, cache_handler):
for realm in self.realms:
if hasattr(realm, 'cache_handler'): # implies cache support
realm.cache_handler = cache_handler
if hasattr(self.session_manager, 'apply_cache_handler'):
self.session_manager.apply_cache_handler(cache_handler)
def apply_event_bus(self, eventbus):
self.authenticator.event_bus = eventbus
self.authorizer.event_bus = eventbus
self.session_manager.apply_event_bus(eventbus)
def apply_realms(self):
self.authenticator.init_realms(self.realms)
self.authorizer.init_realms(self.realms)
def is_permitted(self, identifiers, permission_s):
"""
:type identifiers: SimpleIdentifierCollection
:param permission_s: a collection of 1..N permissions
:type permission_s: List of Permission object(s) or String(s)
:returns: a List of tuple(s), containing the Permission and a Boolean
indicating whether the permission is granted
"""
return self.authorizer.is_permitted(identifiers, permission_s)
def is_permitted_collective(self, identifiers, permission_s, logical_operator):
"""
:type identifiers: SimpleIdentifierCollection
:param permission_s: a collection of 1..N permissions
:type permission_s: List of Permission object(s) or String(s)
:param logical_operator: indicates whether all or at least one
permission check is true (any)
:type: any OR all (from python standard library)
:returns: a Boolean
"""
return self.authorizer.is_permitted_collective(identifiers,
permission_s,
logical_operator)
def check_permission(self, identifiers, permission_s, logical_operator):
"""
:type identifiers: SimpleIdentifierCollection
:param permission_s: a collection of 1..N permissions
:type permission_s: List of Permission objects or Strings
:param logical_operator: indicates whether all or at least one
permission check is true (any)
:type: any OR all (from python standard library)
:returns: a List of Booleans corresponding to the permission elements
"""
return self.authorizer.check_permission(identifiers,
permission_s,
logical_operator)
def has_role(self, identifiers, role_s):
"""
:type identifiers: SimpleIdentifierCollection
:param role_s: 1..N role identifiers (strings)
:type role_s: Set of Strings
:returns: a set of tuple(s), containing the role and a Boolean
indicating whether the user is a member of the Role
"""
return self.authorizer.has_role(identifiers, role_s)
def has_role_collective(self, identifiers, role_s, logical_operator):
"""
:type identifiers: SimpleIdentifierCollection
:param logical_operator: indicates whether all or at least one
permission check is true (any)
:type: any OR all (from python standard library)
:param role_s: 1..N role identifier
:type role_s: a Set of Strings
:returns: a Boolean
"""
return self.authorizer.has_role_collective(identifiers,
role_s, logical_operator)
def check_role(self, identifiers, role_s, logical_operator):
"""
:type identifiers: SimpleIdentifierCollection
:param role_s: 1..N role identifier
:type role_s: a Set of Strings
:param logical_operator: indicates whether all or at least one
permission check is true (any)
:type: any OR all (from python standard library)
:raises UnauthorizedException: if Subject not assigned to all roles
"""
return self.authorizer.check_role(identifiers,
role_s, logical_operator)
"""
* ===================================================================== *
* SessionManager Methods *
* ===================================================================== *
"""
def start(self, session_context):
return self.session_manager.start(session_context)
def get_session(self, session_key):
return self.session_manager.get_session(session_key)
"""
* ===================================================================== *
* SecurityManager Methods *
* ===================================================================== *
"""
# existing_subject is used by WebSecurityManager:
def create_subject_context(self, existing_subject):
if not hasattr(self, 'yosai'):
msg = "SecurityManager has no Yosai attribute set."
raise AttributeError(msg)
return SubjectContext(self.yosai, self)
def create_subject(self,
authc_token=None,
account_id=None,
existing_subject=None,
subject_context=None):
"""
Creates a ``Subject`` instance for the user represented by the given method
arguments.
It is an overloaded method, due to porting java to python, and is
consequently highly likely to be refactored.
It gets called in one of two ways:
1) when creating an anonymous subject, passing create_subject
a subject_context argument
2) following a after successful login, passing all but the context argument
This implementation functions as follows:
- Ensures that the ``SubjectContext`` exists and is as populated as it can be,
using heuristics to acquire data that may not have already been available
to it (such as a referenced session or remembered identifiers).
- Calls subject_context.do_create_subject to perform the Subject
instance creation
- Calls subject.save to ensure the constructed Subject's state is
accessible for future requests/invocations if necessary
- Returns the constructed Subject instance
:type authc_token: subject_abcs.AuthenticationToken
:param account_id: the identifiers of a newly authenticated user
:type account_id: SimpleIdentifierCollection
:param existing_subject: the existing Subject instance that initiated the
authentication attempt
:type subject_context: subject_abcs.Subject
:type subject_context: subject_abcs.SubjectContext
:returns: the Subject instance that represents the context and session
data for the newly authenticated subject
"""
if subject_context is None: # this that means a successful login just happened
# passing existing_subject is new to yosai:
context = self.create_subject_context(existing_subject)
context.authenticated = True
context.authentication_token = authc_token
context.account_id = account_id
if existing_subject:
context.subject = existing_subject
else:
context = copy.copy(subject_context) # if this necessary? TBD.
context = self.ensure_security_manager(context)
context = self.resolve_session(context)
context = self.resolve_identifiers(context)
subject = self.do_create_subject(context) # DelegatingSubject
# save this subject for future reference if necessary:
# (this is needed here in case remember_me identifiers were resolved
# and they need to be stored in the session, so we don't constantly
# re-hydrate the remember_me identifier_collection on every operation).
self.save(subject)
return subject
def update_subject_identity(self, account_id, subject):
subject.identifiers = account_id
self.save(subject)
return subject
def remember_me_successful_login(self, authc_token, account_id, subject):
rmm = self.remember_me_manager
if rmm is not None:
try:
rmm.on_successful_login(subject, authc_token, account_id)
except Exception:
msg = ("Delegate RememberMeManager instance of type [" +
rmm.__class__.__name__ + "] threw an exception "
+ "during on_successful_login. RememberMe services "
+ "will not be performed for account_id [" + str(account_id) +
"].")
self._logger.warning(msg, exc_info=True)
else:
msg = ("This " + rmm.__class__.__name__ +
" instance does not have a [RememberMeManager] instance " +
"configured. RememberMe services will not be performed " +
"for account_id [" + str(account_id) + "].")
self._logger.info(msg)
def remember_me_failed_login(self, authc_token, authc_exc, subject):
rmm = self.remember_me_manager
if rmm is not None:
try:
rmm.on_failed_login(subject, authc_token, authc_exc)
except Exception:
msg = ("Delegate RememberMeManager instance of type "
"[" + rmm.__class__.__name__ + "] threw an exception "
"during on_failed_login for AuthenticationToken [" +
str(authc_token) + "].")
self._logger.warning(msg, exc_info=True)
def remember_me_logout(self, subject):
rmm = self.remember_me_manager
if rmm is not None:
try:
rmm.on_logout(subject)
except Exception as ex:
msg = ("Delegate RememberMeManager instance of type [" +
rmm.__class__.__name__ + "] threw an exception during "
"on_logout for subject with identifiers [{identifiers}]".
format(identifiers=subject.identifiers if subject else None))
self._logger.warning(msg, exc_info=True)
def login(self, subject, authc_token):
"""
Login authenticates a user using an AuthenticationToken. If authentication is
successful AND the Authenticator has determined that authentication is
complete for the account, login constructs a Subject instance representing
the authenticated account's identity. Once a subject instance is constructed,
it is bound to the application for subsequent access before being returned
to the caller.
If login successfully authenticates a token but the Authenticator has
determined that subject's account isn't considered authenticated,
the account is configured for multi-factor authentication.
Sessionless environments must pass all authentication tokens to login
at once.
:param subject:
:param authc_token: the authenticationToken to process for the login attempt
:type authc_token: authc_abcs.authenticationToken
:returns: a Subject representing the authenticated user
:raises AuthenticationException: if there is a problem authenticating
the specified authc_token
:raises AdditionalAuthenticationRequired: during multi-factor authentication
when additional tokens are required
"""
try:
# account_id is a SimpleIdentifierCollection
account_id = self.authenticator.authenticate_account(subject.identifiers,
authc_token)
# implies multi-factor authc not complete:
except AdditionalAuthenticationRequired as exc:
# identity needs to be accessible for subsequent authentication:
self.update_subject_identity(exc.account_id, subject)
# no need to propagate account further:
raise AdditionalAuthenticationRequired
except AuthenticationException as authc_ex:
try:
self.on_failed_login(authc_token, authc_ex, subject)
except Exception:
msg = ("on_failed_login method raised an exception. Logging "
"and propagating original AuthenticationException.")
self._logger.info(msg, exc_info=True)
raise
logged_in = self.create_subject(authc_token=authc_token,
account_id=account_id,
existing_subject=subject)
self.on_successful_login(authc_token, account_id, logged_in)
return logged_in
def on_successful_login(self, authc_token, account_id, subject):
self.remember_me_successful_login(authc_token, account_id, subject)
def on_failed_login(self, authc_token, authc_exc, subject):
self.remember_me_failed_login(authc_token, authc_exc, subject)
def before_logout(self, subject):
self.remember_me_logout(subject)
def do_create_subject(self, subject_context):
"""
By the time this method is invoked, all possible
``SubjectContext`` data (session, identifiers, et. al.) has been made
accessible using all known heuristics.
:returns: a Subject instance reflecting the data in the specified
SubjectContext data map
"""
security_manager = subject_context.resolve_security_manager()
session = subject_context.resolve_session()
session_creation_enabled = subject_context.session_creation_enabled
# passing the session arg is new to yosai, eliminating redunant
# get_session calls:
identifiers = subject_context.resolve_identifiers(session)
remembered = getattr(subject_context, 'remembered', False)
authenticated = subject_context.resolve_authenticated(session)
host = subject_context.resolve_host(session)
return DelegatingSubject(identifiers=identifiers,
remembered=remembered,
authenticated=authenticated,
host=host,
session=session,
session_creation_enabled=session_creation_enabled,
security_manager=security_manager)
def save(self, subject):
"""
Saves the subject's state to a persistent location for future reference.
This implementation merely delegates saving to the internal subject_store.
"""
self.subject_store.save(subject)
def delete(self, subject):
"""
This method removes (or 'unbinds') the Subject's state from the
application, typically called during logout.
This implementation merely delegates deleting to the internal subject_store.
:param subject: the subject for which state will be removed
"""
self.subject_store.delete(subject)
def ensure_security_manager(self, subject_context):
"""
Determines whether there is a ``SecurityManager`` instance in the context,
and if not, adds 'self' to the context. This ensures that do_create_subject
will have access to a ``SecurityManager`` during Subject construction.
:param subject_context: the subject context data that may contain a
SecurityManager instance
:returns: the SubjectContext
"""
if subject_context.resolve_security_manager() is not None:
msg = ("Subject Context resolved a security_manager "
"instance, so not re-assigning. Returning.")
self._logger.debug(msg)
return subject_context
msg = ("No security_manager found in context. Adding self "
"reference.")
self._logger.debug(msg)
subject_context.security_manager = self
return subject_context
def resolve_session(self, subject_context):
"""
This method attempts to resolve any associated session based on the
context and returns a context that represents this resolved Session to
ensure it may be referenced, if needed, by the invoked do_create_subject
that performs actual ``Subject`` construction.
If there is a ``Session`` already in the context (because that is what the
caller wants to use for Subject construction) or if no session is
resolved, this method effectively does nothing, returning an
unmodified context as it was received by the method.
:param subject_context: the subject context data that may resolve a
Session instance
:returns: the context
"""
if subject_context.resolve_session() is not None:
msg = "Context already contains a session. Returning."
self._logger.debug(msg)
return subject_context
try:
# Context couldn't resolve it directly, let's see if we can
# since we have direct access to the session manager:
session = self.resolve_context_session(subject_context)
# if session is None, given that subject_context.session
# is None there is no harm done by setting it to None again
subject_context.session = session
except InvalidSessionException:
msg = ("Resolved subject_subject_context context session is "
"invalid. Ignoring and creating an anonymous "
"(session-less) Subject instance.")
self._logger.debug(msg, exc_info=True)
return subject_context
def resolve_context_session(self, subject_context):
session_key = self.get_session_key(subject_context)
if session_key is not None:
return self.get_session(session_key)
return None
def get_session_key(self, subject_context):
session_id = subject_context.session_id
if session_id is not None:
return SessionKey(session_id)
return None
# yosai.core.omits is_empty method
def resolve_identifiers(self, subject_context):
"""
ensures that a subject_context has identifiers and if it doesn't will
attempt to locate them using heuristics
"""
session = subject_context.session
identifiers = subject_context.resolve_identifiers(session)
if not identifiers:
msg = ("No identity (identifier_collection) found in the "
"subject_context. Looking for a remembered identity.")
self._logger.debug(msg)
identifiers = self.get_remembered_identity(subject_context)
if identifiers:
msg = ("Found remembered IdentifierCollection. Adding to the "
"context to be used for subject construction.")
self._logger.debug(msg)
subject_context.identifiers = identifiers
subject_context.remembered = True
else:
msg = ("No remembered identity found. Returning original "
"context.")
self._logger.debug(msg)
return subject_context
def create_session_context(self, subject_context):
session_context = {}
if not subject_context.is_empty:
session_context.update(subject_context.__dict__)
session_id = subject_context.session_id
if session_id:
session_context['session_id'] = session_id
host = subject_context.resolve_host(None)
if host:
session_context['host'] = host
return session_context
def logout(self, subject):
"""
Logs out the specified Subject from the system.
Note that most application developers should not call this method unless
they have a good reason for doing so. The preferred way to logout a
Subject is to call ``Subject.logout()``, not by calling ``SecurityManager.logout``
directly. However, framework developers might find calling this method
directly useful in certain cases.
:param subject: the subject to log out:
:type subject: subject_abcs.Subject
"""
if subject is None:
msg = "Subject argument cannot be None."
raise ValueError(msg)
self.before_logout(subject)
identifiers = copy.copy(subject.identifiers) # copy is new to yosai
if identifiers:
msg = ("Logging out subject with primary identifier {0}".format(
identifiers.primary_identifier))
self._logger.debug(msg)
try:
# this removes two internal attributes from the session:
self.delete(subject)
except Exception:
msg = "Unable to cleanly unbind Subject. Ignoring (logging out)."
self._logger.debug(msg, exc_info=True)
finally:
try:
self.stop_session(subject)
except Exception:
msg2 = ("Unable to cleanly stop Session for Subject. "
"Ignoring (logging out).")
self._logger.debug(msg2, exc_info=True)
def stop_session(self, subject):
session = subject.get_session(False)
if session:
session.stop(subject.identifiers)
def get_remembered_identity(self, subject_context):
"""
Using the specified subject context map intended to build a ``Subject``
instance, returns any previously remembered identifiers for the subject
for automatic identity association (aka 'Remember Me').
"""
rmm = self.remember_me_manager
if rmm is not None:
try:
return rmm.get_remembered_identifiers(subject_context)
except Exception as ex:
msg = ("Delegate RememberMeManager instance of type [" +
rmm.__class__.__name__ + "] raised an exception during "
"get_remembered_identifiers().")
self._logger.warning(msg, exc_info=True)
return None
```
#### File: core/mgt/mgt_settings.py
```python
from yosaipy2.core import (
maybe_resolve,
)
class RememberMeSettings(object):
def __init__(self, settings):
rmm_config = settings.REMEMBER_ME_CONFIG
self.default_cipher_key = rmm_config.get('default_cipher_key').encode()
class SecurityManagerSettings(object):
"""
SecurityManagerSettings is a settings proxy. It is new for Yosai.
It obtains security-manager related configuration from Yosai's global
settings, defaulting values when necessary.
"""
def __init__(self, settings):
self.settings = settings
manager_config = settings.SECURITY_MANAGER_CONFIG
self.security_manager = \
maybe_resolve(manager_config.get('security_manager',
'yosaipy2.core.NativeSecurityManager'))
self.attributes = self.resolve_attributes(manager_config.get('attributes'))
def resolve_attributes(self, attributes):
serializer = attributes.get('serializer', 'cbor')
realms = self.resolve_realms(attributes)
cache_handler = self.resolve_cache_handler(attributes)
session_attributes = self.resolve_session_attributes(attributes)
return {'serializer': serializer,
'realms': realms,
'cache_handler': cache_handler,
'session_attributes': session_attributes
}
def resolve_cache_handler(self, attributes):
return maybe_resolve(attributes.get('cache_handler'))
def resolve_session_attributes(self, attributes):
return maybe_resolve(attributes.get('session_attributes'))
def resolve_realms(self, attributes):
"""
The format of realm settings is:
{'name_of_realm':
{'cls': 'location to realm class',
'account_store': 'location to realm account_store class'}}
- 'name of realm' is a label used for internal tracking
- 'cls' and 'account_store' are static key names and are not to be changed
- the location of classes should follow dotted notation: pkg.module.class
"""
realms = []
for realm, realm_attributes in attributes['realms'].items():
realm_cls = maybe_resolve(realm)
account_store_cls = maybe_resolve(realm_attributes['account_store'])
verifiers = {}
authc_verifiers = realm_attributes.get('authc_verifiers')
if authc_verifiers:
if isinstance(authc_verifiers, list):
authc_verifiers_cls = tuple(maybe_resolve(verifier)(self.settings) for
verifier in authc_verifiers)
else:
authc_verifiers_cls = tuple([maybe_resolve(authc_verifiers)(self.settings)])
verifiers['authc_verifiers'] = authc_verifiers_cls
authz_verifier = realm_attributes.get('authz_verifier')
if authz_verifier:
permission_verifier_cls = maybe_resolve(authz_verifier)
if permission_verifier_cls:
verifiers['permission_verifier'] = maybe_resolve(permission_verifier_cls)()
realms.append([realm_cls, account_store_cls, verifiers])
return realms
def __repr__(self):
return "SecurityManagerSettings(security_manager={0}, attributes={1})". \
format(self.security_manager, self.attributes)
```
#### File: core/session/schema.py
```python
from marshmallow import fields, post_load, pre_dump
from session import SimpleSession
from yosaipy2.core.serialize.serializer import BaseSchema
from yosaipy2.core.subject.schema import SimpleIdentifierSchema
from yosaipy2.core.utils.utils import get_logger
from copy import copy
logger = get_logger()
class SimpleSessionSchema(BaseSchema):
under_type = SimpleSession
session_id = fields.String(allow_none=True)
start_timestamp = fields.Integer(allow_none=True)
stop_timestamp = fields.Integer(allow_none=True)
last_access_time = fields.Integer(allow_none=True)
idle_timeout = fields.Integer(allow_none=True)
absolute_timeout = fields.Integer(allow_none=True)
is_expired = fields.Boolean(allow_none=True)
host = fields.String(allow_none=True)
internal_attributes = fields.Dict(allow_none=True)
attributes = fields.Dict(allow_none=True)
@pre_dump
def encode_attribute(self, data):
# type:(SimpleSession) -> SimpleSession
internal_attributes = data.internal_attributes
if 'identifiers_session_key' not in internal_attributes:
return data
elif not internal_attributes['identifiers_session_key']:
return data
schema = SimpleIdentifierSchema()
result = schema.dumps(internal_attributes['identifiers_session_key'])
if result.errors:
mesg = "encode internal attribute error: {}".format(result.errors)
logger.error(mesg)
raise Exception(mesg)
copied = copy(internal_attributes)
copied['identifiers_session_key'] = result.data
copied_data = copy(data)
copied_data.internal_attributes = copied
return copied_data
@post_load
def make_session(self, data):
s = SimpleSession(data['absolute_timeout'], data['idle_timeout'], data['host'])
for k in data:
if hasattr(s, k):
setattr(s, k, data[k])
s.session_id = data['session_id']
result = self._decode_internal(data['internal_attributes'])
if not result:
return s
s.set_internal_attribute('identifiers_session_key', result)
return s
@staticmethod
def _decode_internal(internal_attributes):
if 'identifiers_session_key' not in internal_attributes:
return None
elif not internal_attributes['identifiers_session_key']:
return None
schema = SimpleIdentifierSchema()
result = schema.loads(internal_attributes['identifiers_session_key'])
if result.errors:
mesg = "decode internal attributes error: {}".format(result.errors)
logger.error(mesg)
raise Exception(mesg)
return result.data
```
#### File: core/session/session_settings.py
```python
import datetime
class SessionSettings(object):
"""
SessionSettings is a settings proxy. It is new for Yosai.
It obtains the session configuration from Yosai's global settings
and default values if there aren't any.
"""
def __init__(self, settings):
# omitted millisecond conversions
session_config = settings.SESSION_CONFIG
timeout_config = session_config.get('session_timeout', None)
validation_config = session_config.get('session_validation', None)
# convert to milliseconds:
self.absolute_timeout = timeout_config.get('absolute_timeout', 1800) * 1000 # def:30min
self.idle_timeout = timeout_config.get('idle_timeout', 900) * 1000 # def:15min
self.validation_scheduler_enable = validation_config.get('scheduler_enabled', True)
self.interval = validation_config.get('time_interval', 3600) # def:1hr
self.validation_time_interval = datetime.timedelta(seconds=self.interval)
def __repr__(self):
mesg = "SessionSettings(absolute_timeout={0}, idle_timeout={1}, " \
"validation_scheduler_enable={2}, validation_time_interval={3})"
return (mesg.format(
self.absolute_timeout,
self.idle_timeout,
self.validation_scheduler_enable,
self.validation_time_interval
))
```
|
{
"source": "Jellybean940-afk/EasyFNBot",
"score": 3
}
|
#### File: EasyFNBot/Fortnite/Extras.py
```python
def isNaN(Nummer):
try:
Nummer = int(Nummer)
return False
except:
return True
def MtxCurrencyConverter(VBucks):
VBucks = int(VBucks)
Price = int(0)
while VBucks > 13500 or VBucks == 13500:
Price += 99.99
VBucks -= 13500
while VBucks > 7500 or VBucks == 7500:
Price += 59.99
VBucks -= 7500
while VBucks > 2800 or VBucks == 2800:
Price += 24.99
VBucks -= 2800
while VBucks > 1000 or VBucks == 1000:
Price += 9.99
VBucks -= 1000
while VBucks > 0:
Price += 9.99
VBucks -= 1000
return round(Price, 2)
```
#### File: EasyFNBot/Fortnite/party.py
```python
import datetime,os,fortnitepy,time,requests,json,fn_api_wrapper
from Fortnite import colored
TimeInUTC = datetime.datetime.utcnow().strftime('%H:%M:%S')
fnapi = fn_api_wrapper.FortniteAPI()
async def event_party_invite(self, invitation):
if (self.Settings["JoinPartyOnInvitation"]) or (invitation.author.id in self.Settings["GiveFullAccessTo"]):
await self.user.party.me.set_emote('EID_Wave')
await invitation.accept()
async def event_party_member_promote(self, Member):
if self.Settings["ThanksOnPromote"]:
if Member.id == self.user.id:
await self.user.party.send("Thanks for promoting me ♥")
await self.user.party.me.set_emote("EID_TrueLove")
async def event_party_member_join(self, Member):
if Member.id == self.user.id:
if self.user.party.me.is_leader and self.user.party.member_count == 1:
os.system(colored.Colored(f'[BOT {self.user.display_name}] [{TimeInUTC}] Created Party', "green"))
else:
os.system(colored.Colored(f'[BOT {self.user.display_name}] [{TimeInUTC}] Joined Party', "green"))
try:
Level = int(self.Settings["SeasonLevel"])
await self.user.party.me.set_banner(season_level=Level)
except:
await self.user.party.me.set_banner(season_level=100)
await self.user.party.me.set_banner(icon=self.Settings["Banner"])
CID = fnapi.GetSkin(NameorId=self.Settings["DefaultSkin"],matchMethod="starts")
if CID.status != 200:
os.system(colored.Colored(f'Can\'t find {self.Settings["DefaultSkin"]}',"red"))
CID = "CID_022_Athena_Commando_F"
else:
CID = CID.id
await self.user.party.me.set_outfit(asset=CID)
if self.Settings["EmoteAfterJoiningParty"] and not self.Settings["EmoteName"] == "":
EID = fnapi.GetEmote(NameorId=self.Settings["EmoteName"],matchMethod="starts")
if EID.status != 200:
os.system(colored.Colored(f'Can\'t find {self.Settings["EmoteName"]}',"red"))
else:
await self.user.party.me.set_emote(asset=EID.id)
if self.Settings["DefaultBackpack"] != "":
BID = fnapi.GetBackpack(NameorId=self.Settings["DefaultBackpack"],matchMethod="starts")
if BID != 200:
os.system(colored.Colored(f'Can\'t find {self.Settings["DefaultBackpack"]}',"red"))
else:
await self.user.party.me.set_backpack(asset=BID.id)
else:
os.system(colored.Colored(f'[BOT {self.user.display_name}] [{TimeInUTC}] {Member.display_name} Joined the Party', "green"))
await self.user.party.send(f"Welcome {Member.display_name} type !help if you need any help, if you want your own bot join my discord Server : https://discord.gg/2n2c7Pn")
```
|
{
"source": "JellybeanAsh/Fast-F1",
"score": 2
}
|
#### File: JellybeanAsh/Fast-F1/conftest.py
```python
import pytest
def pytest_addoption(parser):
parser.addoption(
"--f1-tel-api", action="store_true", default=False,
help="run tests which require connecting to the f1 telemetry api"
)
parser.addoption(
"--ergast-api", action="store_true", default=False,
help="run tests which require connecting to ergast"
)
parser.addoption(
"--lint-only", action="store_true", default=False,
help="only run linter and skip all tests"
)
parser.addoption(
"--prj-doc", action="store_true", default=False,
help="run only tests for general project structure and documentation"
)
parser.addoption(
"--slow", action="store_true", default=False,
help="run very slow tests too: this may take 30 minutes or more and will may multiple"
"hundred requests to the api server - usage is highly discouraged"
)
def pytest_configure(config):
config.addinivalue_line("markers", "f1telapi: test connects to the f1 telemetry api")
config.addinivalue_line("markers", "ergastapi: test connects to the ergast api")
config.addinivalue_line("markers", "prjdoc: general non-code tests for project and structure")
config.addinivalue_line("markers", "slow: extremely slow tests (multiple minutes)")
def pytest_collection_modifyitems(config, items):
# cli conditional skip extremely slow tests
if not config.getoption("--slow"):
skip_slow = pytest.mark.skip(reason="need --slow option to run; usage highly discouraged")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
# cli conditional skip test that connect to the f1 telemetry api
if not config.getoption("--f1-tel-api"):
skip_f1_tel = pytest.mark.skip(reason="need --f1-tel-api option to run")
for item in items:
if "f1telapi" in item.keywords:
item.add_marker(skip_f1_tel)
# cli conditional skip test that connect to the ergast api
if not config.getoption("--ergast-api"):
skip_ergast = pytest.mark.skip(reason="need --ergast-api option to run")
for item in items:
if "ergastapi" in item.keywords:
item.add_marker(skip_ergast)
# lint only: skip all
if config.getoption('--lint-only'):
items[:] = [item for item in items if item.get_closest_marker('flake8')]
# only test documentation and project structure
if config.getoption('--prj-doc'):
skip_non_prj = pytest.mark.skip(reason="--prj-doc given: run only project structure and documentation tests")
for item in items:
if "prjdoc" not in item.keywords:
item.add_marker(skip_non_prj)
else:
skip_prj = pytest.mark.skip(reason="need --prj-doc to run project structure and documentation tests")
for item in items:
if "prjdoc" in item.keywords:
item.add_marker(skip_prj)
@pytest.fixture
def reference_laps_data():
# provides a reference instance of session and laps to tests which
# require it
import fastf1
fastf1.Cache.enable_cache("test_cache/")
session = fastf1.get_session(2020, 'Italy', 'R')
laps = session.load_laps(with_telemetry=True)
return session, laps
```
#### File: fastf1/tests/test_laps.py
```python
import pandas as pd
import pytest
import fastf1
import pandas
import datetime
from fastf1.testing.reference_values import LAP_DTYPES, ensure_data_type
def test_constructor():
laps = fastf1.core.Laps({'example': (1, 2, 3, 4, 5, 6)})
sliced = laps.iloc[:2]
assert isinstance(sliced, fastf1.core.Laps)
def test_constructor_sliced():
laps = fastf1.core.Laps({'example': (1, 2, 3, 4, 5, 6)})
single = laps.iloc[:2].iloc[0]
assert isinstance(single, fastf1.core.Lap)
def test_base_class_view_laps():
laps = fastf1.core.Laps()
bcv = laps.base_class_view
assert isinstance(bcv, pandas.DataFrame)
@pytest.mark.f1telapi
def test_dtypes_from_api(reference_laps_data):
session, laps = reference_laps_data
ensure_data_type(LAP_DTYPES, laps)
@pytest.mark.f1telapi
def test_dtypes_pick(reference_laps_data):
session, laps = reference_laps_data
drv = list(laps['Driver'].unique())[1] # some driver
ensure_data_type(LAP_DTYPES, laps.pick_driver(drv))
ensure_data_type(LAP_DTYPES, laps.pick_quicklaps())
ensure_data_type(LAP_DTYPES, laps.iloc[:2])
ensure_data_type(LAP_DTYPES,
laps.pick_driver(drv).iloc[:3].pick_quicklaps())
@pytest.mark.f1telapi
def test_laps_get_car_data(reference_laps_data):
session, laps = reference_laps_data
drv = list(laps['Driver'].unique())[1] # some driver
drv_laps = laps.pick_driver(drv)
car = drv_laps.get_car_data()
assert car.shape == (26559, 10)
assert not car.isna().sum().sum() # sum rows then columns
for col in ('Speed', 'RPM', 'nGear', 'Throttle', 'Brake', 'DRS',
'Time', 'SessionTime', 'Date', 'Source'):
assert col in car.columns
@pytest.mark.f1telapi
def test_laps_get_pos_data(reference_laps_data):
session, laps = reference_laps_data
drv = list(laps['Driver'].unique())[1] # some driver
drv_laps = laps.pick_driver(drv)
pos = drv_laps.get_pos_data()
assert pos.shape == (29330, 8)
assert not pos.isna().sum().sum()
for col in ('X', 'Y', 'Z', 'Status', 'Time', 'SessionTime', 'Date',
'Source'):
assert col in pos.columns
@pytest.mark.f1telapi
def test_laps_get_telemetry(reference_laps_data):
session, laps = reference_laps_data
drv = list(laps['Driver'].unique())[1] # some driver
drv_laps = laps.pick_driver(drv)
tel = drv_laps.get_telemetry()
assert tel.shape == (55788, 18)
assert not tel.isna().sum().sum()
for col in ('Speed', 'RPM', 'nGear', 'Throttle', 'Brake', 'DRS',
'X', 'Y', 'Z', 'Status', 'Time', 'SessionTime', 'Date',
'Source', 'Distance', 'DriverAhead'):
assert col in tel.columns
@pytest.mark.f1telapi
def test_laps_get_weather_data(reference_laps_data):
session, laps = reference_laps_data
wd = laps.get_weather_data()
assert wd.shape == (924, 8)
for col in ('AirTemp', 'Humidity', 'Pressure', 'Rainfall',
'TrackTemp', 'WindDirection', 'WindSpeed', 'Time'):
assert col in wd.columns
# test that an empty laps object returns empty weather data
no_laps = fastf1.core.Laps()
no_laps.session = session
no_wd = no_laps.get_weather_data()
assert isinstance(no_wd, pd.DataFrame)
assert no_wd.empty
for col in ('AirTemp', 'Humidity', 'Pressure', 'Rainfall',
'TrackTemp', 'WindDirection', 'WindSpeed', 'Time'):
assert col in wd.columns
@pytest.mark.f1telapi
def test_lap_get_car_data(reference_laps_data):
session, laps = reference_laps_data
drv_laps = laps.pick_fastest()
car = drv_laps.get_car_data()
assert car.shape == (340, 10)
assert not car.isna().sum().sum() # sum rows then columns
for col in ('Speed', 'RPM', 'nGear', 'Throttle', 'Brake', 'DRS',
'Time', 'SessionTime', 'Date', 'Source'):
assert col in car.columns
@pytest.mark.f1telapi
def test_lap_get_pos_data(reference_laps_data):
session, laps = reference_laps_data
drv_laps = laps.pick_fastest()
pos = drv_laps.get_pos_data()
assert pos.shape == (377, 8)
assert not pos.isna().sum().sum()
for col in ('X', 'Y', 'Z', 'Status', 'Time', 'SessionTime', 'Date',
'Source'):
assert col in pos.columns
@pytest.mark.f1telapi
def test_lap_get_telemetry(reference_laps_data):
session, laps = reference_laps_data
drv_laps = laps.pick_fastest()
tel = drv_laps.get_telemetry()
assert tel.shape == (719, 18)
# DistanceToDriverAhead may contain nan values
assert not tel.loc[:, tel.columns != 'DistanceToDriverAhead']\
.isna().sum().sum()
for col in ('Speed', 'RPM', 'nGear', 'Throttle', 'Brake', 'DRS',
'X', 'Y', 'Z', 'Status', 'Time', 'SessionTime', 'Date',
'Source', 'Distance', 'DriverAhead'):
assert col in tel.columns
@pytest.mark.f1telapi
def test_lap_get_weather_data(reference_laps_data):
session, laps = reference_laps_data
# check a valid lap
fastest = laps.pick_fastest()
wd = fastest.get_weather_data()
assert wd.shape == (8, )
for col in ('AirTemp', 'Humidity', 'Pressure', 'Rainfall',
'TrackTemp', 'WindDirection', 'WindSpeed', 'Time'):
assert col in wd.index
# create a 'fake' lap for which no weather data exists
# should use last known value
lap = fastf1.core.Lap(index=fastest.index, dtype='object')
lap.session = session
lap['Time'] = datetime.timedelta(days=1/24*3)
lap['LapStartTime'] = lap['Time'] - datetime.timedelta(seconds=30)
wd_last = lap.get_weather_data()
pd.testing.assert_series_equal(wd_last, session.weather_data.iloc[-1])
```
#### File: fastf1/tests/test_livetiming.py
```python
import os
from fastf1.core import Session, Weekend
from fastf1.livetiming.data import LiveTimingData
def test_file_loading_w_errors():
# load file with many errors and invalid data without crashing
livedata = LiveTimingData('fastf1/testing/reference_data/livedata/with_errors.txt')
livedata.load()
def test_file_loading():
# load a valid file
livedata = LiveTimingData('fastf1/testing/reference_data/livedata/2021_1_FP3.txt')
livedata.load()
weekend = Weekend(2021, 1)
session = Session(weekend=weekend, session_name='test_session')
session.load_laps(with_telemetry=True, livedata=livedata)
assert session.laps.shape == (274, 26)
assert session.car_data['44'].shape == (17362, 10)
def test_duplicate_removal(tmpdir):
# create a temporary file with two identical lines of data
tmpfile = os.path.join(tmpdir, 'tmpfile.txt')
data = "['TimingAppData', {'Lines': {'22': {'Stints': {'0': {" \
"'LapFlags': 0, 'Compound': 'UNKNOWN', 'New': 'false'," \
"'TyresNotChanged': '0', 'TotalLaps': 0, 'StartLaps':" \
"0}}}}}, '2021-03-27T12:00:32.086Z']\n"
with open(tmpfile, 'w') as fobj:
fobj.write(data)
fobj.write(data)
livedata = LiveTimingData(tmpfile)
assert len(livedata.get('TimingAppData')) == 1
livedata = LiveTimingData(tmpfile, remove_duplicates=False)
assert len(livedata.get('TimingAppData')) == 2
```
#### File: fastf1/tests/test_plotting.py
```python
import pytest
from fastf1.plotting import TEAM_COLORS, TEAM_TRANSLATE
def test_team_colors_dict_warning():
with pytest.raises(KeyError):
with pytest.warns(UserWarning):
TEAM_COLORS['Ferrari']
with pytest.warns(UserWarning):
TEAM_COLORS.get('Ferrari', None)
TEAM_COLORS['ferrari']
TEAM_COLORS.get('ferrari', None)
def test_team_color_name_abbreviation_integrity():
for value in TEAM_TRANSLATE.values():
assert value in TEAM_COLORS
assert len(TEAM_COLORS) == len(TEAM_TRANSLATE)
```
#### File: fastf1/tests/test_utils.py
```python
import datetime
from fastf1.utils import to_datetime, to_timedelta
def test_to_timedelta():
cases = [
('13:24:46.320215',
datetime.timedelta(hours=13, minutes=24,
seconds=46, microseconds=320215)),
('13:24:46.32',
datetime.timedelta(hours=13, minutes=24,
seconds=46, microseconds=320000)),
('13:24:46.',
datetime.timedelta(hours=13, minutes=24,
seconds=46, microseconds=0)),
('13:24:46', datetime.timedelta(hours=13, minutes=24, seconds=46)),
('24:46', datetime.timedelta(minutes=24, seconds=46)),
('4:46', datetime.timedelta(minutes=4, seconds=46)),
('46', datetime.timedelta(seconds=46)),
('4:46.5264', datetime.timedelta(minutes=4, seconds=46,
microseconds=526400)),
]
for ts, expected in cases:
assert to_timedelta(ts) == expected
def test_to_datetime():
cases = [
('2020-12-13T13:27:15.320653Z',
datetime.datetime(2020, 12, 13, 13, 27, 15, 320653)),
('2020-12-13T13:27:15.320000Z',
datetime.datetime(2020, 12, 13, 13, 27, 15, 320000)),
('2020-12-13T13:27:15.320000',
datetime.datetime(2020, 12, 13, 13, 27, 15, 320000)),
('2020-12-13T13:27:15.32Z',
datetime.datetime(2020, 12, 13, 13, 27, 15, 320000)),
('2020-12-13T13:27:15',
datetime.datetime(2020, 12, 13, 13, 27, 15, 0)),
('2020-12-13T13:27:15.',
datetime.datetime(2020, 12, 13, 13, 27, 15, 0)),
(datetime.datetime(2020, 12, 13, 13, 27, 15, 0),
datetime.datetime(2020, 12, 13, 13, 27, 15, 0))
]
for ts, expected in cases:
assert to_datetime(ts) == expected
```
|
{
"source": "JellyBeanXiewh/django-vue-e_commerce",
"score": 2
}
|
#### File: back_end/shop/models.py
```python
from django.db import models
from django.utils.encoding import smart_str
class Account(models.Model):
username = models.CharField('用户名', primary_key=True, max_length=20)
password = models.CharField('密码', max_length=16)
class Meta:
verbose_name = verbose_name_plural = '账户'
db_table = 'Account'
def __str__(self):
return smart_str(self.username)
class Address(models.Model):
addr_id = models.AutoField('地址ID', primary_key=True, auto_created=True)
username = models.ForeignKey('UserInfo', models.CASCADE, db_column='username', verbose_name='账户')
receiver = models.CharField('收件人', max_length=10)
address = models.CharField('收货地址', max_length=50)
phone_number = models.CharField('联系电话', max_length=15)
class Meta:
verbose_name = verbose_name_plural = '收货地址'
db_table = 'Address'
class Cart(models.Model):
username = models.ForeignKey('UserInfo', models.CASCADE, db_column='username', verbose_name='账户')
item_id = models.ForeignKey('Item', models.CASCADE, db_column='item_id', verbose_name='商品ID')
amount = models.IntegerField('数量')
update_time = models.DateTimeField('更新时间', auto_now_add=True)
class Meta:
verbose_name = verbose_name_plural = '购物车'
db_table = 'Cart'
unique_together = (('item_id', 'username'),)
class Item(models.Model):
item_id = models.AutoField('商品ID', primary_key=True, auto_created=True)
item_name = models.CharField('商品名称', max_length=20)
price = models.DecimalField('价格', max_digits=10, decimal_places=2)
typeid = models.ForeignKey('ItemType', models.PROTECT, db_column='typeid', verbose_name='商品类型')
description = models.TextField('商品描述', blank=True, null=True)
class Meta:
verbose_name = verbose_name_plural = '商品信息'
db_table = 'Item'
def __str__(self):
return smart_str('{}: {}'.format(self.item_id, self.item_name))
class ItemType(models.Model):
typeid = models.AutoField('商品类型ID', primary_key=True, auto_created=True)
typename = models.CharField('类型名称', max_length=20)
class Meta:
verbose_name = verbose_name_plural = '商品类型'
db_table = 'ItemType'
def __str__(self):
return smart_str('{}({})'.format(self.typename, self.typeid))
class Order(models.Model):
order_id = models.AutoField('订单号', primary_key=True, auto_created=True)
username = models.ForeignKey('UserInfo', models.DO_NOTHING, db_column='username', verbose_name='账户')
create_time = models.DateTimeField('创建时间', auto_now_add=True)
payment_time = models.DateTimeField('支付时间', blank=True, null=True)
ship_time = models.DateTimeField('发货时间', blank=True, null=True)
finish_time = models.DateTimeField('完成时间', blank=True, null=True)
summary_price = models.DecimalField('总金额', max_digits=10, decimal_places=2, default=0)
status = models.IntegerField('订单状态',
choices=[
(0, '待支付'),
(1, '待发货'),
(2, '已发货'),
(3, '已完成'),
(4, '已取消'),
(5, '已退款'),
(6, '已退货')
],
default=0)
delivery_company = models.CharField('物流公司', max_length=8, blank=True, null=True)
delivery_id = models.CharField('快递单号', max_length=20, blank=True, null=True)
class Meta:
verbose_name = verbose_name_plural = '订单'
db_table = 'Order'
def __str__(self):
return smart_str(self.order_id)
class OrderDetails(models.Model):
order_id = models.ForeignKey('Order', models.CASCADE, db_column='order_id', verbose_name='订单号')
item_id = models.CharField('商品ID', max_length=8)
item_name = models.CharField('商品名称', max_length=20)
price = models.DecimalField('购买单价', max_digits=10, decimal_places=2)
amount = models.IntegerField('数量')
product_review = models.OneToOneField('ProductReview', models.SET_NULL, db_column='pr_id', blank=True, null=True, verbose_name='评价ID')
class Meta:
verbose_name = verbose_name_plural = '订单详情'
db_table = 'OrderDetails'
unique_together = (('order_id', 'item_id'),)
class ProductReview(models.Model):
review_id = models.AutoField('评价ID', primary_key=True, auto_created=True)
order_id = models.OneToOneField(OrderDetails, models.CASCADE, db_column='order_id', verbose_name='订单ID')
review_property = models.IntegerField('评价属性',
default=3,
choices=[
(0, '差评'),
(1, '中评'),
(2, '好评')
],)
content = models.TextField('评价内容', blank=True, null=True)
class Meta:
verbose_name = verbose_name_plural = '评价'
db_table = 'ProductReview'
class StockInfo(models.Model):
item = models.OneToOneField(Item, models.CASCADE, db_column='item_id', primary_key=True, verbose_name='商品ID')
inventory = models.IntegerField('库存量')
class Meta:
verbose_name = verbose_name_plural = '库存信息'
db_table = 'StockInfo'
def __str__(self):
return smart_str(self.item)
class UserInfo(models.Model):
username = models.OneToOneField(Account, models.CASCADE, db_column='username', primary_key=True, verbose_name='账户')
name = models.CharField('昵称', max_length=10, blank=True, null=True)
sex = models.BooleanField('性别', choices=[(True, '男'), (False, '女')])
phone = models.CharField('绑定手机', max_length=15, blank=True, null=True)
register_date = models.DateField('注册日期', blank=True, null=True, auto_now_add=True)
class Meta:
verbose_name = verbose_name_plural = '用户信息'
db_table = 'UserInfo'
def __str__(self):
return smart_str('{}({})'.format(self.name, self.username))
```
|
{
"source": "JellyBrick/music_transcription_MAPS",
"score": 2
}
|
#### File: JellyBrick/music_transcription_MAPS/prepare_data.py
```python
import numpy as np
import argparse
from scipy import signal
from midiutil.MidiFile import MIDIFile
import matplotlib.pyplot as plt
import soundfile
import librosa
import csv
import time
import h5py
import pickle
import _pickle as cPickle
import os
from sklearn import preprocessing
import config as cfg
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def get_filename(path):
path = os.path.realpath(path)
na_ext = path.split('/')[-1]
na = os.path.splitext(na_ext)[0]
return na
### Audio & feature related.
def read_audio(path, target_fs=None):
"""Read 1 dimension audio sequence from given path.
Args:
path: string, path of audio.
target_fs: int, resampling rate.
Returns:
audio: 1 dimension audio sequence.
fs: sampling rate of audio.
"""
(audio, fs) = soundfile.read(path)
if audio.ndim > 1:
audio = np.mean(audio, axis=1)
if target_fs is not None and fs != target_fs:
audio = librosa.resample(audio, orig_sr=fs, target_sr=target_fs)
fs = target_fs
return audio, fs
def write_audio(path, audio, sample_rate):
"""Write audio sequence to .wav file.
Args:
path: string, path to write out .wav file.
data: ndarray, audio sequence to write out.
sample_rate: int, sample rate to write out.
Returns:
None.
"""
soundfile.write(file=path, data=audio, samplerate=sample_rate)
def spectrogram(audio):
"""Calculate magnitude spectrogram of an audio sequence.
Args:
audio: 1darray, audio sequence.
Returns:
x: ndarray, spectrogram (n_time, n_freq)
"""
n_window = cfg.n_window
n_overlap = cfg.n_overlap
ham_win = np.hamming(n_window)
[f, t, x] = signal.spectral.spectrogram(
audio,
window=ham_win,
nperseg=n_window,
noverlap=n_overlap,
detrend=False,
return_onesided=True,
mode='magnitude')
x = x.T
x = x.astype(np.float32)
return x
def logmel(audio):
"""Calculate log Mel spectrogram of an audio sequence.
Args:
audio: 1darray, audio sequence.
Returns:
x: ndarray, log Mel spectrogram (n_time, n_freq)
"""
n_window = cfg.n_window
n_overlap = cfg.n_overlap
fs = cfg.sample_rate
ham_win = np.hamming(n_window)
[f, t, x] = signal.spectral.spectrogram(
audio,
window=ham_win,
nperseg=n_window,
noverlap=n_overlap,
detrend=False,
return_onesided=True,
mode='magnitude')
x = x.T
if globals().get('melW') is None:
global melW
melW = librosa.filters.mel(sr=fs,
n_fft=n_window,
n_mels=229,
fmin=0,
fmax=fs / 2.)
x = np.dot(x, melW.T)
x = np.log(x + 1e-8)
x = x.astype(np.float32)
return x
def calculate_features(args):
"""Calculate and write out features & ground truth notes of all songs in MUS
directory of all pianos.
"""
dataset_dir = args.dataset_dir
workspace = args.workspace
feat_type = args.feat_type
fs = cfg.sample_rate
tr_pianos = cfg.tr_pianos
te_pianos = cfg.te_pianos
pitch_bgn = cfg.pitch_bgn
pitch_fin = cfg.pitch_fin
out_dir = os.path.join(workspace, "features", feat_type)
create_folder(out_dir)
# Calculate features for all 9 pianos.
cnt = 0
for piano in tr_pianos + te_pianos:
audio_dir = os.path.join(dataset_dir, piano, "MUS")
wav_names = [na for na in os.listdir(audio_dir) if na.endswith('.wav')]
for wav_na in wav_names:
# Read audio.
bare_na = os.path.splitext(wav_na)[0]
wav_path = os.path.join(audio_dir, wav_na)
(audio, _) = read_audio(wav_path, target_fs=fs)
# Calculate feature.
if feat_type == "spectrogram":
x = spectrogram(audio)
elif feat_type == "logmel":
x = logmel(audio)
else:
raise Exception("Error!")
# Read piano roll from txt file.
(n_time, n_freq) = x.shape
txt_path = os.path.join(audio_dir, "{0}.txt".format(bare_na))
print(txt_path)
roll = txt_to_midi_roll(txt_path, max_fr_len=n_time) # (n_time, 128)
y = roll[:, pitch_bgn : pitch_fin] # (n_time, 88)
# Write out data.
data = [x, y]
out_path = os.path.join(out_dir, "{0}.p".format(bare_na))
print((cnt, out_path, x.shape, y.shape))
cPickle.dump(data, open(out_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
cnt += 1
### Pack features.
def is_in_pianos(na, list_of_piano):
"""E.g., na="MAPS_MUS-alb_esp2_SptkBGCl.wav", list_of_piano=['SptkBGCl', ...]
then return True.
"""
for piano in list_of_piano:
if piano in na:
return True
return False
def pack_features(args):
"""Pack already calculated features and write out to a big file, for
speeding up later loading.
"""
workspace = args.workspace
feat_type = args.feat_type
tr_pianos = cfg.tr_pianos
te_pianos = cfg.te_pianos
fe_dir = os.path.join(workspace, "features", feat_type)
fe_names = os.listdir(fe_dir)
# Load all single feature files and append to list.
tr_x_list, tr_y_list, tr_na_list = [], [], []
te_x_list, te_y_list, te_na_list = [], [], []
t1 = time.time()
cnt = 0
for fe_na in fe_names:
print(cnt)
bare_na = os.path.splitext(fe_na)[0]
fe_path = os.path.join(fe_dir, fe_na)
[x, y] = cPickle.load(open(fe_path, 'rb'))
if is_in_pianos(fe_na, tr_pianos):
tr_x_list.append(x)
tr_y_list.append(y)
tr_na_list.append("%s.wav" % bare_na)
elif is_in_pianos(fe_na, te_pianos):
te_x_list.append(x)
te_y_list.append(y)
te_na_list.append("%s.wav" % bare_na)
else:
raise Exception("File not in tr_pianos or te_pianos!")
cnt += 1
# Write out the big file.
out_dir = os.path.join(workspace, "packed_features", feat_type)
create_folder(out_dir)
tr_packed_feat_path = os.path.join(out_dir, "train.p")
te_packed_feat_path = os.path.join(out_dir, "test.p")
cPickle.dump([tr_x_list, tr_y_list, tr_na_list], open(tr_packed_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
cPickle.dump([te_x_list, te_y_list, te_na_list], open(te_packed_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
print(("Packing time: %s s" % (time.time() - t1,)))
### Scaler related.
def compute_scaler(args):
"""Compute and write out scaler from already packed feature file. Using
scaler in training neural network can speed up training.
"""
workspace = args.workspace
feat_type = args.feat_type
# Load packed features.
t1 = time.time()
packed_feat_path = os.path.join(workspace, "packed_features", feat_type, "train.p")
[x_list, _, _] = cPickle.load(open(packed_feat_path, 'rb'))
# Compute scaler.
x_all = np.concatenate(x_list)
scaler = preprocessing.StandardScaler(with_mean=True, with_std=True).fit(x_all)
print((scaler.mean_))
print((scaler.scale_))
# Save out scaler.
out_path = os.path.join(workspace, "scalers", feat_type, "scaler.p")
create_folder(os.path.dirname(out_path))
pickle.dump(scaler, open(out_path, 'wb'))
print(("Compute scaler finished! %s s" % (time.time() - t1,)))
def scale_on_x_list(x_list, scaler):
"""Scale list of ndarray.
"""
return [scaler.transform(e) for e in x_list]
### Data pre-processing.
def data_to_3d(x_list, y_list, n_concat, n_hop):
"""Convert data to 3d tensor.
Args:
x_list: list of ndarray, e.g., [(N1, n_freq), (N2, n_freq), ...]
y_list: list of ndarray, e.g., [(N1, 88), (N2, 88), ...]
n_concat: int, number of frames to concatenate.
n_hop: int, hop frames.
Returns:
x_all: (n_samples, n_concat, n_freq)
y_all: (n_samples, n_out)
"""
x_all, y_all = [], []
n_half = (n_concat - 1) // 2
for e in x_list:
x3d = mat_2d_to_3d(e, n_concat, n_hop)
x_all.append(x3d)
for e in y_list:
y3d = mat_2d_to_3d(e, n_concat, n_hop)
y_all.append(y3d)
x_all = np.concatenate(x_all, axis=0) # (n_samples, n_concat, n_freq)
y_all = np.concatenate(y_all, axis=0) # (n_samples, n_concat, n_out)
y_all = y_all[:, n_half, :] # (n_samples, n_out)
return x_all, y_all
def mat_2d_to_3d(x, agg_num, hop):
"""Convert data to 3d tensor.
Args:
x: 2darray, e.g., (N, n_in)
agg_num: int, number of frames to concatenate.
hop: int, hop frames.
Returns:
x3d: 3darray, e.g., (n_samples, agg_num, n_in)
"""
# pad to at least one block
len_x, n_in = x.shape
if (len_x < agg_num):
x = np.concatenate((x, np.zeros((agg_num-len_x, n_in))))
# agg 2d to 3d
len_x = len(x)
i1 = 0
x3d = []
while (i1+agg_num <= len_x):
x3d.append(x[i1:i1+agg_num])
i1 += hop
x3d = np.array(x3d)
return x3d
### I/O.
def txt_to_midi_roll(txt_path, max_fr_len):
"""Read txt to piano roll.
Args:
txt_path: string, path of note info txt.
max_fr_len: int, should be the same as the number of frames of calculated
feature.
Returns:
midi_roll: (n_time, 108)
"""
step_sec = cfg.step_sec
with open(txt_path, 'rt') as f:
reader = csv.reader(f, delimiter='\t')
lis = list(reader)
midi_roll = np.zeros((max_fr_len, 128))
for i1 in range(1, len(lis)):
# Read a note info from a line.
try:
[onset_time, offset_time, midi_pitch] = lis[i1]
onset_time = float(onset_time)
offset_time = float(offset_time)
midi_pitch = int(midi_pitch)
# Write a note info to midi roll.
onset_fr = int(np.floor(onset_time / step_sec))
offset_fr = int(np.ceil(offset_time / step_sec)) + 1
midi_roll[onset_fr : offset_fr, midi_pitch] = 1
except ValueError:
continue
return midi_roll
def prob_to_midi_roll(x, thres):
"""Threshold input probability to binary, then convert piano roll (n_time, 88)
to midi roll (n_time, 108).
Args:
x: (n_time, n_pitch)
"""
pitch_bgn = cfg.pitch_bgn
x_bin = np.zeros_like(x)
x_bin[np.where(x >= thres)] = 1
n_time = x.shape[0]
out = np.zeros((n_time, 128))
out[:, pitch_bgn : pitch_bgn + 88] = x_bin
return out
def write_midi_roll_to_midi(x, out_path):
"""Write out midi_roll to midi file.
Args:
x: (n_time, n_pitch), midi roll.
out_path: string, path to write out the midi.
"""
step_sec = cfg.step_sec
def _get_bgn_fin_pairs(ary):
pairs = []
bgn_fr, fin_fr = -1, -1
for i2 in range(1, len(ary)):
if ary[i2-1] == 0 and ary[i2] == 0:
pass
elif ary[i2-1] == 0 and ary[i2] == 1:
bgn_fr = i2
elif ary[i2-1] == 1 and ary[i2] == 0:
fin_fr = i2
if fin_fr > bgn_fr:
pairs.append((bgn_fr, fin_fr))
elif ary[i2-1] == 1 and ary[i2] == 1:
pass
else:
raise Exception("Input must be binary matrix!")
return pairs
# Get (pitch, bgn_frame, fin_frame) triple.
triples = []
(n_time, n_pitch) = x.shape
for i1 in range(n_pitch):
ary = x[:, i1]
pairs_per_pitch = _get_bgn_fin_pairs(ary)
if pairs_per_pitch:
triples_per_pitch = [(i1,) + pair for pair in pairs_per_pitch]
triples += triples_per_pitch
# Sort by begin frame.
triples = sorted(triples, key=lambda x: x[1])
# Write out midi.
MyMIDI = MIDIFile(1) # Create the MIDIFile Object with 1 track
track = 0
time = 0
tempo = 120
beat_per_sec = 60. / float(tempo)
MyMIDI.addTrackName(track, time, "Sample Track") # Add track name
MyMIDI.addTempo(track, time, tempo) # Add track tempo
for triple in triples:
(midi_pitch, bgn_fr, fin_fr) = triple
bgn_beat = bgn_fr * step_sec / float(beat_per_sec)
fin_beat = fin_fr * step_sec / float(beat_per_sec)
dur_beat = fin_beat - bgn_beat
MyMIDI.addNote(track=0, # The track to which the note is added.
channel=0, # the MIDI channel to assign to the note. [Integer, 0-15]
pitch=midi_pitch, # the MIDI pitch number [Integer, 0-127].
time=bgn_beat, # the time (in beats) at which the note sounds [Float].
duration=dur_beat, # the duration of the note (in beats) [Float].
volume=100) # the volume (velocity) of the note. [Integer, 0-127].
out_file = open(out_path, 'wb')
MyMIDI.writeFile(out_file)
out_file.close()
### Evaluation.
def tp_fn_fp_tn(p_y_pred, y_gt, thres, average):
"""
Args:
p_y_pred: shape = (n_samples,) or (n_samples, n_classes)
y_gt: shape = (n_samples,) or (n_samples, n_classes)
thres: float between 0 and 1.
average: None (element wise) | 'micro' (calculate metrics globally)
| 'macro' (calculate metrics for each label then average).
Returns:
tp, fn, fp, tn or list of tp, fn, fp, tn.
"""
if p_y_pred.ndim == 1:
y_pred = np.zeros_like(p_y_pred)
y_pred[np.where(p_y_pred > thres)] = 1.
tp = np.sum(y_pred + y_gt > 1.5)
fn = np.sum(y_gt - y_pred > 0.5)
fp = np.sum(y_pred - y_gt > 0.5)
tn = np.sum(y_pred + y_gt < 0.5)
return tp, fn, fp, tn
elif p_y_pred.ndim == 2:
tps, fns, fps, tns = [], [], [], []
n_classes = p_y_pred.shape[1]
for j1 in range(n_classes):
(tp, fn, fp, tn) = tp_fn_fp_tn(p_y_pred[:, j1], y_gt[:, j1], thres, None)
tps.append(tp)
fns.append(fn)
fps.append(fp)
tns.append(tn)
if average is None:
return tps, fns, fps, tns
elif average == 'micro' or average == 'macro':
return np.sum(tps), np.sum(fns), np.sum(fps), np.sum(tns)
else:
raise Exception("Incorrect average arg!")
else:
raise Exception("Incorrect dimension!")
def prec_recall_fvalue(p_y_pred, y_gt, thres, average):
"""
Args:
p_y_pred: shape = (n_samples,) or (n_samples, n_classes)
y_gt: shape = (n_samples,) or (n_samples, n_classes)
thres: float between 0 and 1.
average: None (element wise) | 'micro' (calculate metrics globally)
| 'macro' (calculate metrics for each label then average).
Returns:
prec, recall, fvalue | list or prec, recall, fvalue.
"""
eps = 1e-10
if p_y_pred.ndim == 1:
(tp, fn, fp, tn) = tp_fn_fp_tn(p_y_pred, y_gt, thres, average=None)
prec = tp / max(float(tp + fp), eps)
recall = tp / max(float(tp + fn), eps)
fvalue = 2 * (prec * recall) / max(float(prec + recall), eps)
return prec, recall, fvalue
elif p_y_pred.ndim == 2:
n_classes = p_y_pred.shape[1]
if average is None or average == 'macro':
precs, recalls, fvalues = [], [], []
for j1 in range(n_classes):
(prec, recall, fvalue) = prec_recall_fvalue(p_y_pred[:, j1], y_gt[:, j1], thres, average=None)
precs.append(prec)
recalls.append(recall)
fvalues.append(fvalue)
if average is None:
return precs, recalls, fvalues
elif average == 'macro':
return np.mean(precs), np.mean(recalls), np.mean(fvalues)
elif average == 'micro':
(prec, recall, fvalue) = prec_recall_fvalue(p_y_pred.flatten(), y_gt.flatten(), thres, average=None)
return prec, recall, fvalue
else:
raise Exception("Incorrect average arg!")
else:
raise Exception("Incorrect dimension!")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
subparsers = parser.add_subparsers(dest='mode')
parser_a = subparsers.add_parser('calculate_features')
parser_a.add_argument('--dataset_dir', type=str)
parser_a.add_argument('--workspace', type=str)
parser_a.add_argument('--feat_type', type=str, choices=['logmel'])
parser_pack_features = subparsers.add_parser('pack_features')
parser_pack_features.add_argument('--workspace', type=str)
parser_pack_features.add_argument('--feat_type', type=str, choices=['logmel'])
parser_compute_scaler = subparsers.add_parser('compute_scaler')
parser_compute_scaler.add_argument('--workspace', type=str)
parser_compute_scaler.add_argument('--feat_type', type=str, choices=['logmel'])
args = parser.parse_args()
if args.mode == 'calculate_features':
calculate_features(args)
elif args.mode == 'pack_features':
pack_features(args)
elif args.mode == 'compute_scaler':
compute_scaler(args)
else:
raise Exception("Incorrect argument!")
```
|
{
"source": "jelly/chrome-remote-debugger-lib",
"score": 3
}
|
#### File: chrome-remote-debugger-lib/chrome_remote_lib/__init__.py
```python
import json
import sys
import requests
import websocket
class ChromeDebuggerConnectionError(Exception):
pass
class ChromeShell(object):
def __init__(self, host='localhost', port=9222):
self.host = host
self.port = port
self.url = 'http://{0}:{1}/json/'.format(self.host, self.port)
def tabs(self, title=''):
try:
tabs = json.loads(requests.get(self.url + 'list').text)
except requests.ConnectionError:
raise ChromeDebuggerConnectionError('Unable to connect to Chrome debugger on {0}'.format(self.url))
return [ChromeTab(tab, self) for tab in tabs if title in tab['title']]
def create_tab(self, url = None):
if url:
url = self.url + 'new?' + url
else:
url = self.url + 'new'
req = requests.get(url)
return ChromeTab(json.loads(req.text), shell=self)
def __repr__(self):
return 'ChromiumShell(host={0}, port={1})'.format(self.host, self.port)
class ChromeTab(object):
def __init__(self, data, shell):
self.shell = shell
self.data = data
self.cmd_id = 1
self._ws = None # Lazy load websocket url
@property
def id(self):
return self.data['id']
@property
def title(self):
return self.data['title']
@property
def ws(self):
# XXX: where do we close the websocket?
if self._ws is None:
self._ws = websocket.create_connection(self.data['webSocketDebuggerUrl'])
return self._ws
def close(self):
req = requests.get(self.shell.url + 'close/' + self.id)
# XXX: or raise exception?
if 'Could not close' in req.text:
return False
else:
return True
def reload(self, cache=True, script=''):
# XXX: check if script is sane?
payload = { 'id': self.cmd_id, 'method': 'Page.reload', 'params': {'ignoreCache': cache, 'scriptToEvaluateOnLoad': script}}
self.ws.send(json.dumps(payload)) # XXX: exceptions
self.cmd_id += 1
data = json.loads(self.ws.recv())
# FIXME: more error handling?
if 'errors' in data:
return False
else:
return True
def navigate(self, url):
# XXX: wrapper for generating payload + request
payload = { 'id': self.cmd_id, 'method': 'Page.navigate', 'params': {'url': url}}
self.ws.send(json.dumps(payload)) # XXX: exceptions
self.cmd_id += 1
data = json.loads(self.ws.recv())
# FIXME: resolve to new tab instance.
print(data)
# XXX: update tab
def __repr__(self):
if sys.version_info > (3, 0):
return u'ChromiumTab({0})'.format(self.data['title'])
else:
return unicode(self).encode(sys.stdout.encoding or 'utf8')
def __unicode__(self):
# XXX: empty title
return u'ChromiumTab({0})'.format(self.data['title'])
```
|
{
"source": "Jellycream/Cold-Calling",
"score": 3
}
|
#### File: Jellycream/Cold-Calling/dialTest.py
```python
import RPi.GPIO as GPIO
import math, sys, os
import subprocess
import socket
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
c=0
last = 1
def count(pin):
global c
c = c + 1
GPIO.add_event_detect(18, GPIO.BOTH)
while True:
try:
if GPIO.event_detected(18):
current = GPIO.input(18)
if(last != current):
if(current == 0):
GPIO.add_event_detect(23, GPIO.BOTH, callback=count, bouncetime=10)
else:
GPIO.remove_event_detect(23)
number = int((c-1)/2)
print ("You dial", number)
c= 0
last = GPIO.input(18)
except KeyboardInterrupt:
break
```
|
{
"source": "jellycsc/building-autoencoders-in-Pytorch",
"score": 3
}
|
#### File: jellycsc/building-autoencoders-in-Pytorch/main.py
```python
import numpy as np
# Torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# Torchvision
import torchvision
import torchvision.transforms as transforms
# Matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
# OS
import os
import argparse
# Set random seed for reproducibility
SEED = 87
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed(SEED)
def print_model(encoder, decoder):
print("============== Encoder ==============")
print(encoder)
print("============== Decoder ==============")
print(decoder)
print("")
def create_model():
autoencoder = Autoencoder()
print_model(autoencoder.encoder, autoencoder.decoder)
if torch.cuda.is_available():
autoencoder = autoencoder.cuda()
print("Model moved to GPU in order to speed up training.")
return autoencoder
def get_torch_vars(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def imshow(img):
npimg = img.cpu().numpy()
plt.axis('off')
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
# Input size: [batch, 3, 32, 32]
# Output size: [batch, 3, 32, 32]
self.encoder = nn.Sequential(
nn.Conv2d(3, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.Conv2d(12, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.Conv2d(24, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]
nn.ReLU(),
# nn.Conv2d(48, 96, 4, stride=2, padding=1), # [batch, 96, 2, 2]
# nn.ReLU(),
)
self.decoder = nn.Sequential(
# nn.ConvTranspose2d(96, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]
# nn.ReLU(),
nn.ConvTranspose2d(48, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.ConvTranspose2d(24, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.ConvTranspose2d(12, 3, 4, stride=2, padding=1), # [batch, 3, 32, 32]
nn.Sigmoid(),
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded
def main():
parser = argparse.ArgumentParser(description="Train Autoencoder")
parser.add_argument("--valid", action="store_true", default=False,
help="Perform validation only.")
args = parser.parse_args()
# Create model
autoencoder = create_model()
# Load data
transform = transforms.Compose(
[transforms.ToTensor(), ])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=16,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=16,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
if args.valid:
print("Loading checkpoint...")
autoencoder.load_state_dict(torch.load("./weights/autoencoder.pkl"))
dataiter = iter(testloader)
images, labels = dataiter.next()
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(16)))
imshow(torchvision.utils.make_grid(images))
images = Variable(images.cuda())
decoded_imgs = autoencoder(images)[1]
imshow(torchvision.utils.make_grid(decoded_imgs.data))
exit(0)
# Define an optimizer and criterion
criterion = nn.BCELoss()
optimizer = optim.Adam(autoencoder.parameters())
for epoch in range(100):
running_loss = 0.0
for i, (inputs, _) in enumerate(trainloader, 0):
inputs = get_torch_vars(inputs)
# ============ Forward ============
encoded, outputs = autoencoder(inputs)
loss = criterion(outputs, inputs)
# ============ Backward ============
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ============ Logging ============
running_loss += loss.data
if i % 2000 == 1999:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
print('Saving Model...')
if not os.path.exists('./weights'):
os.mkdir('./weights')
torch.save(autoencoder.state_dict(), "./weights/autoencoder.pkl")
if __name__ == '__main__':
main()
```
|
{
"source": "jellycsc/cracking-the-coding-interview-practice",
"score": 4
}
|
#### File: cracking-the-coding-interview-practice/ctci_utils/linkedlist.py
```python
class LinkedListNode:
# Standard singly linkedlist node
def __init__(self, val):
self.val = val
self.next = None
def __str__(self):
return str(self.val)
def print_forward(self):
# No need for extra space
# Space complexity: O(1)
cur = self
while cur is not None:
print(str(cur.val) + " -> ", end='')
cur = cur.next
print("None")
def print_backward(self):
# Space complexity: O(n) because of recursion
if self.next is None:
print(self.val)
return
self.next.print_backward()
print(self.val)
def add_to_front(self, node):
node.next = self
return node
def length(self):
length = 0
cur = self
while cur is not None:
length += 1
cur = cur.next
return length
class LinkedList:
# Wrapper for LinkedListNode
def __init__(self):
print("Not friendly for recursion. Be careful!")
self.head = None
def __str__(self):
cur = self.head
val_list = []
while cur is not None:
val_list.append(str(cur.val))
cur = cur.next
return 'None' if len(val_list) == 0 else ' -> '.join(val_list) + ' -> None'
def add_to_front(self, node):
node.next = self.head
self.head = node
```
|
{
"source": "jellycsc/Tic-Tac-Toe",
"score": 4
}
|
#### File: jellycsc/Tic-Tac-Toe/tictactoe_program.py
```python
import tictactoe_functions
import random
MAX_BOARD_SIZE = 9
SYMBOLS = 'OX'
COMPUTER = 0
HUMAN = 1
def is_valid_response(response, min_value, max_value):
""" (str, number, number) -> bool
Return True if and only if the str response contains the representation
of an int value without a +/- sign that is between min_value and max_value,
inclusive.
>>> is_valid_response('4', 1, 9)
True
>>> is_valid_response('abc', 1, 3.14159)
False
"""
return (response.isdigit() and
tictactoe_functions.is_between(int(response),
min_value, max_value))
def get_valid_response(prompt_message, error_message, min_value, max_value):
""" (str, str, int, int) -> int
Return the user's response to prompt_message, as an int, possibly after
repeated prompting. Display error_message when response is not the str
representation of an int value without a +/- sign that is between min_value
and max_value, inclusive.
(No docstring example given since function depends on user input.)
"""
response = input(prompt_message)
while not is_valid_response(response, min_value, max_value):
print(error_message)
response = input(prompt_message)
return int(response)
def get_game_size():
""" () -> int
Return the valid tic-tac-toe game board size entered by game player.
(No docstring example given since the function depends on user input.)
"""
prompt_message = ('\nEnter desired game board size (an int between 1 and '
+ str(MAX_BOARD_SIZE) + '): ')
error_message = 'Your requested game board size is not valid. Try again!'
return get_valid_response(prompt_message, error_message, 1, MAX_BOARD_SIZE)
def is_occupied(row_index, col_index, game_board):
""" (int, int, str) -> int
Precondition: row_index and col_index are valid indices for a cell in the
tic-tac-toe game_board.
Return True if and only if the cell with indices row_index and col_index
in the tic-tac-toe game_board does not contain the EMPTY cell character.
>>> is_occupied(1, 1, 'XOX-')
True
>>> is_occupied(2, 2, 'XOX-')
False
"""
board_size = tictactoe_functions.get_board_size(game_board)
position = tictactoe_functions.get_position(row_index, col_index,
board_size)
return game_board[position] != tictactoe_functions.EMPTY
def get_row_col(is_human_move, game_board):
""" (bool, str) -> tuple of (int, int)
Return an ordered pair of row and column indices that are valid for the
tic-tac-toe game_board. When is_human_move is True, player is prompted
for indices, otherwise they are randomly generated.
(No docstring example given since the function either depends on user
input or randomly generated numbers.)
"""
board_size = tictactoe_functions.get_board_size(game_board)
if is_human_move:
print('Your move. Enter row and column numbers between 1 and '
+ str(board_size) + '.')
row = get_valid_response('Enter row number: ',
'Your suggested row number was invalid!', 1, board_size)
col = get_valid_response('Enter col number: ',
'Your suggested col number was invalid!', 1, board_size)
else:
row = random.randint(1, board_size)
col = random.randint(1, board_size)
return (row, col)
def get_move(is_human_move, game_board):
""" (bool, str) -> tuple of (int, int)
Return player's move as an ordered pair of row and column indices that
are valid for the tic-tac-toe game_board.
(No docstring example given since the function indirectly depends on either
user input or randomly generated numbers.)
"""
(row, col) = get_row_col(is_human_move, game_board)
while is_occupied(row, col, game_board):
if is_human_move:
print('That spot is already taken! Try again.')
(row, col) = get_row_col(is_human_move, game_board)
return (row, col)
# Interested in why this docstring starts with an r?
# See section 2.4.1: https://docs.python.org/3.4/reference/lexical_analysis.html
def format_game_board(game_board):
r""" (str) -> str
Format the tic-tac-toe game_board in a nice grid format for printing.
>>> format_game_board('XOX-')
"\nThe X's and O's board:\n\n 1 2 \n\n1 X | O\n ---+---\n2 X | -\n"
"""
board_size = tictactoe_functions.get_board_size(game_board)
formatted_board = ''
# Format the title.
formatted_board += '\nThe X\'s and O\'s board:\n\n'
# Add in the column numbers.
formatted_board += ' '
for col in range(1, board_size):
formatted_board += (' ' + str(col) + ' ')
formatted_board += (' ' + str(board_size) + ' \n\n')
# Add in the row numbers, board contents and grid markers.
position = 0
for row in range(1, board_size + 1):
formatted_board += (str(row) + ' ')
for col in range(1, board_size):
formatted_board += (' ' + game_board[position] + ' |')
position = position + 1
formatted_board += (' ' + game_board[position] + '\n')
position = position + 1
if row < board_size:
formatted_board += (' ' + '---+' * (board_size - 1) + '---\n')
return formatted_board
def game_won(game_board, symbol):
""" (str, str) -> bool
Return True if and only if the player using symbol has won the
tic-tac-toe game represented by game_board.
>>> game_won('XXX-O-O--', 'X')
True
>>> game_won('XOXOXOOXO', 'X')
False
"""
board_size = tictactoe_functions.get_board_size(game_board)
winning_string = symbol * board_size
for col in range(1, board_size + 1):
extract = tictactoe_functions.extract_line(game_board, 'down', col)
if extract == winning_string:
return True
for row in range(1, board_size + 1):
extract = tictactoe_functions.extract_line(game_board, 'across', row)
if extract == winning_string:
return True
extract = tictactoe_functions.extract_line(game_board, 'down_diagonal', 1)
if extract == winning_string:
return True
extract = tictactoe_functions.extract_line(game_board, 'up_diagonal', 1)
if extract == winning_string:
return True
return False
def play_tictactoe():
""" () -> None
Play a single game of tic-tac-toe, with one player being the program
user and the other player being this computer program.
(No docstring example given since the function indirectly depends on either
user input or randomly generated numbers.)
"""
# Initialize the game setup.
board_size = get_game_size()
game_board = tictactoe_functions.make_empty_board(board_size)
print('\nYou are using symbol ' + SYMBOLS[HUMAN] + ' and the computer '
+ 'program is using symbol ' + SYMBOLS[COMPUTER] + '.')
print(format_game_board(game_board))
# Play the game until a player wins or there is a draw.
is_human_move = False
have_a_winner = False
while (not have_a_winner and
not tictactoe_functions.game_board_full(game_board)):
is_human_move = not is_human_move
(row,col) = get_move(is_human_move, game_board)
if is_human_move:
player_symbol = SYMBOLS[HUMAN]
print('\nYou chose row ' + str(row) + ' and column '
+ str(col) + '.')
else:
player_symbol = SYMBOLS[COMPUTER]
print('The computer program then chose row ' + str(row)
+ ' and column ' + str(col) + '.')
game_board = tictactoe_functions.make_move(
player_symbol, row, col, game_board)
if not is_human_move:
print(format_game_board(game_board))
have_a_winner = game_won(game_board, player_symbol)
if have_a_winner:
print('We have a winner!')
if is_human_move:
print(format_game_board(game_board))
print('You beat the computer program! Congratulations!')
else:
print('The computer program won!')
print('Re-think your strategy and try again.')
else:
print(format_game_board(game_board))
print('The game has played to a draw.')
print('Re-think your strategy and try again.')
if __name__ == '__main__':
import doctest
doctest.testmod()
play_tictactoe()
```
|
{
"source": "jellycsc/twitter-like-data-query",
"score": 4
}
|
#### File: jellycsc/twitter-like-data-query/twitterverse_functions.py
```python
def process_data(file):
""" (file open for reading) -> Twitterverse dictionary
Return the data of file in the Twitterverse dictionary format.
"""
# Initialize the twitter_dict.
twitter_dict = {}
next_part = file.readline().strip()
while next_part != '':
# Generate the key: 'name', 'location', 'web' and their values
# in the username dictionary.
username = next_part
twitter_dict[username] = {}
twitter_dict[username]['name'] = file.readline().strip()
twitter_dict[username]['location'] = file.readline().strip()
twitter_dict[username]['web'] = file.readline().strip()
# Generate the key: 'bio' and its value in the username dictionary.
line = file.readline()
bio = ''
while line != 'ENDBIO\n':
bio += line
line = file.readline()
twitter_dict[username]['bio'] = bio.strip()
# Generate the key: 'following' and its value in the username dictionary.
twitter_dict[username]['following'] = []
following = file.readline().strip()
while following != 'END':
twitter_dict[username]['following'].append(following)
following = file.readline().strip()
next_part = file.readline().strip()
return twitter_dict
def process_query(file):
""" (file open for reading) -> query dictionary
Return the query of file in the query dictionary format.
"""
# Initialize the query_dict.
query_dict = {}
line = file.readline().strip()
# Generate the key: 'search' and its value in the query_dict.
query_dict['search'] = {}
query_dict['search']['username'] = file.readline().strip()
query_dict['search']['operations'] = []
operations = file.readline().strip()
while operations != 'FILTER':
query_dict['search']['operations'].append(operations)
operations = file.readline().strip()
# Generate the key: 'filter' and its value in the query_dict.
query_dict['filter'] = {}
line = file.readline().strip()
while line != 'PRESENT':
a = line.split()
query_dict['filter'][a[0]] = a[1]
line = file.readline().strip()
# Generate the key: 'present' and its value in the query_dict.
query_dict['present'] = {}
line = file.readline().strip()
while line != '':
b = line.split()
query_dict['present'][b[0]] = b[1]
line = file.readline().strip()
return query_dict
def all_followers(twitter_dict, username):
""" (Twitterverse dictionary, str) -> list of str
Return all the usernames that are following the username as a list based
on the twitter_dict.
>>> twitter_dict1 = {'a':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['b', 'c']}, \
'b':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['a', 'c']}, \
'c':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['b']}}
>>> all_followers(twitter_dict1, 'a')
['b']
>>> twitter_dict2 = {'a':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['c']}, \
'b':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['a', 'c']}, \
'c':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['a']}}
>>> all_followers(twitter_dict2, 'b')
[]
"""
result = []
for i in twitter_dict:
if username in twitter_dict[i]['following']:
result.append(i)
return result
def get_search_results(twitter_dict, search_dict):
""" (Twitterverse dictionary, search specification dictionary) -> list of str
Return a list of strings representing usernames that match the search
criteria in the search_dict based on twitter_dict.
>>> twitter_dict = {'a':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['b', 'c']}, \
'b':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['c']}, \
'c':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['b']}}
>>> search_dict1 = {'operations': ['following'], 'username': 'a'}
>>> get_search_results(twitter_dict, search_dict1)
['b', 'c']
>>> search_dict2 = {'operations': ['followers'], 'username': 'a'}
>>> get_search_results(twitter_dict, search_dict2)
[]
"""
# Check if the operations list is empty.
if search_dict['operations'] == []:
return [search_dict['username']]
else:
# Generate the first result.
if search_dict['operations'][0] == 'following':
result = twitter_dict[search_dict['username']]['following']
elif search_dict['operations'][0] == 'followers':
result = all_followers(twitter_dict, search_dict['username'])
# Use for loop to generate the final result autometically based
# on the first result.
if len(search_dict['operations']) > 1:
for operation in search_dict['operations'][1:]:
sub_result = []
for item in result:
if operation == 'following':
part = twitter_dict[item]['following']
elif operation == 'followers':
part = all_followers(twitter_dict, item)
sub_result += part
# Delet the repeated items.
result = []
for i in sub_result:
if not(i in result):
result.append(i)
return result
def get_filter_results(twitter_dict, list_of_usernames, filter_dict):
""" (Twitterverse dictionary, list of str, filter specification dictionary) -> list of str
Apply the filter_dict to the given username list based on twitter_dict to
determine which usernames in the list_of_usernames to keep, and return the
resulting list of usernames.
>>> twitter_dict = {'a':{'name':'', 'location':'Modesto, California, USA', \
'web':'', 'bio':'', 'following':[]}, \
'b':{'name':'', 'location':'kansas, uSa', 'web':'', \
'bio':'', 'following':[]}, \
'c':{'name':'', 'location':'Musala, Bulgaria', \
'web':'', 'bio':'', 'following':[]}}
>>> list_of_usernames1 = []
>>> filter_dict1 = {'location-includes': 'apple'}
>>> get_filter_results(twitter_dict, list_of_usernames1, filter_dict1)
[]
>>> list_of_usernames2 = ['a', 'b', 'c']
>>> filter_dict2 = {'location-includes': 'UsA'}
>>> get_filter_results(twitter_dict, list_of_usernames2, filter_dict2)
['a', 'b', 'c']
"""
# The list_of_usernames can't be modified, so I created copies.
result = list_of_usernames.copy()
mod_usernames = result.copy()
for operation in filter_dict:
# To remove every username that doesn't satisfy the condition.
for username in result:
if operation == 'name-includes':
if not(filter_dict[operation].lower() in \
twitter_dict[username]['name'].lower()):
mod_usernames.remove(username)
elif operation == 'location-includes':
if not(filter_dict[operation].lower() in \
twitter_dict[username]['location'].lower()):
mod_usernames.remove(username)
elif operation == 'follower':
if not(username in twitter_dict[filter_dict[operation]]['following']):
mod_usernames.remove(username)
elif operation == 'following':
if not(filter_dict[operation] in twitter_dict[username]['following']):
mod_usernames.remove(username)
result = mod_usernames.copy()
return result
def get_present_string(twitter_dict, list_of_usernames, pre_dict):
""" (Twitterverse dictionary, list of str, presentation specification dictionary) -> str
Format the results of list_of_usernames for presentation based on
pre_dict according to twitter_dict and return the formatted string.
>>> twitter_dict = {'a':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['b', 'c']}, \
'b':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['c']}, \
'c':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['b']}}
>>> list_of_usernames1 = ['a', 'b', 'c']
>>> pre_dict1 = {'sort-by': 'username', 'format': 'short'}
>>> get_present_string(twitter_dict, list_of_usernames1, pre_dict1)
"['a', 'b', 'c']"
>>> list_of_usernames2 = []
>>> pre_dict2 = {'sort-by': 'popularity', 'format': 'long'}
>>> get_present_string(twitter_dict, list_of_usernames2, pre_dict2)
'----------\\n----------\\n'
"""
# The list_of_usernames can't be modified, so I created copies.
mod_list_of_usernames = list_of_usernames.copy()
# Creat a function dictionary, so we can call it easily.
word_to_funcs = {'popularity': more_popular, 'username': \
username_first, 'name': name_first}
# Before we present it, let's sort it by using the Sorting Helper Functions.
tweet_sort(twitter_dict, mod_list_of_usernames, \
word_to_funcs[pre_dict['sort-by']])
# Present the format.
if pre_dict['format'] == 'long':
flag = 0
result = '----------\n'
# If the for loop is not executed, then print another line of dashes.
for i in mod_list_of_usernames:
flag += 1
result += str(i) + '\n'
result += 'name: ' + str(twitter_dict[i]['name']) + '\n'
result += 'location: ' + str(twitter_dict[i]['location']) + '\n'
result += 'website: ' + str(twitter_dict[i]['web']) + '\n'
result += 'bio:\n'
result += str(twitter_dict[i]['bio']) + '\n'
result += 'following: ' + str(twitter_dict[i]['following']) + '\n'
result += '----------\n'
if flag == 0:
result += '----------\n'
return result
elif pre_dict['format'] == 'short':
return str(mod_list_of_usernames)
# --- Sorting Helper Functions ---
def tweet_sort(twitter_data, results, cmp):
""" (Twitterverse dictionary, list of str, function) -> NoneType
Sort the results list using the comparison function cmp and the data in
twitter_data.
>>> twitter_data = {\
'a':{'name':'Zed', 'location':'', 'web':'', 'bio':'', 'following':[]}, \
'b':{'name':'Lee', 'location':'', 'web':'', 'bio':'', 'following':[]}, \
'c':{'name':'anna', 'location':'', 'web':'', 'bio':'', 'following':[]}}
>>> result_list = ['c', 'a', 'b']
>>> tweet_sort(twitter_data, result_list, username_first)
>>> result_list
['a', 'b', 'c']
>>> tweet_sort(twitter_data, result_list, name_first)
>>> result_list
['b', 'a', 'c']
"""
# Insertion sort
for i in range(1, len(results)):
current = results[i]
position = i
while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:
results[position] = results[position - 1]
position = position - 1
results[position] = current
def more_popular(twitter_data, a, b):
""" (Twitterverse dictionary, str, str) -> int
Return -1 if user a has more followers than user b, 1 if fewer followers,
and the result of sorting by username if they have the same, based on the
data in twitter_data.
>>> twitter_data = {\
'a':{'name':'', 'location':'', 'web':'', 'bio':'', 'following':['b']}, \
'b':{'name':'', 'location':'', 'web':'', 'bio':'', 'following':[]}, \
'c':{'name':'', 'location':'', 'web':'', 'bio':'', 'following':[]}}
>>> more_popular(twitter_data, 'a', 'b')
1
>>> more_popular(twitter_data, 'a', 'c')
-1
"""
a_popularity = len(all_followers(twitter_data, a))
b_popularity = len(all_followers(twitter_data, b))
if a_popularity > b_popularity:
return -1
if a_popularity < b_popularity:
return 1
return username_first(twitter_data, a, b)
def username_first(twitter_data, a, b):
""" (Twitterverse dictionary, str, str) -> int
Return 1 if user a has a username that comes after user b's username
alphabetically, -1 if user a's username comes before user b's username,
and 0 if a tie, based on the data in twitter_data.
>>> twitter_data = {\
'a':{'name':'', 'location':'', 'web':'', 'bio':'', 'following':['b']}, \
'b':{'name':'', 'location':'', 'web':'', 'bio':'', 'following':[]}, \
'c':{'name':'', 'location':'', 'web':'', 'bio':'', 'following':[]}}
>>> username_first(twitter_data, 'c', 'b')
1
>>> username_first(twitter_data, 'a', 'b')
-1
"""
if a < b:
return -1
if a > b:
return 1
return 0
def name_first(twitter_data, a, b):
""" (Twitterverse dictionary, str, str) -> int
Return 1 if user a's name comes after user b's name alphabetically,
-1 if user a's name comes before user b's name, and the ordering of their
usernames if there is a tie, based on the data in twitter_data.
>>> twitter_data = {\
'a':{'name':'Zed', 'location':'', 'web':'', 'bio':'', 'following':[]}, \
'b':{'name':'Lee', 'location':'', 'web':'', 'bio':'', 'following':[]}, \
'c':{'name':'anna', 'location':'', 'web':'', 'bio':'', 'following':[]}}
>>> name_first(twitter_data, 'c', 'b')
1
>>> name_first(twitter_data, 'b', 'a')
-1
"""
a_name = twitter_data[a]["name"]
b_name = twitter_data[b]["name"]
if a_name < b_name:
return -1
if a_name > b_name:
return 1
return username_first(twitter_data, a, b)
if __name__ == '__main__':
import doctest
doctest.testmod()
```
|
{
"source": "JellyDream/tensorflow_vgg16_age",
"score": 2
}
|
#### File: JellyDream/tensorflow_vgg16_age/vgg_model.py
```python
import tensorflow as tf
import numpy as np
import matplotlib.image as mpimg
import os
#os.environ['CUDA_VISIBLE_DEVICES']='0'
os.environ['TF_CPP_MIN_LOG_LEVEL']='0'
class Model(object):
@staticmethod
def inference(x, drop_rate):
'''第1个隐层:224x224x64'''
with tf.variable_scope('hidden1'):
#卷积层
conv = tf.layers.conv2d(x, filters=64, kernel_size=[3, 3], padding='same')
#规范化
norm = tf.layers.batch_normalization(conv)
#激活函数
activation = tf.nn.relu(norm)
#dropout层
dropout = tf.layers.dropout(activation, rate=drop_rate)
hidden1 = dropout
'''第2隐层:224x224x64'''
with tf.variable_scope('hidden2'):
conv = tf.layers.conv2d(hidden1, filters=64, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden2 = dropout
'''第3隐层:'''
with tf.variable_scope('hidden3'):
conv = tf.layers.conv2d(hidden2, filters=128, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
dropout = tf.layers.dropout(activation, rate=drop_rate)
hidden3 = dropout
'''第4隐层'''
with tf.variable_scope('hidden4'):
conv = tf.layers.conv2d(hidden3, filters=128, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden4 = dropout
'''第5隐层'''
with tf.variable_scope('hidden5'):
conv = tf.layers.conv2d(hidden4, filters=256, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
dropout = tf.layers.dropout(activation, rate=drop_rate)
hidden5 = dropout
'''第6隐层'''
with tf.variable_scope('hidden6'):
conv = tf.layers.conv2d(hidden5, filters=256, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
dropout = tf.layers.dropout(activation, rate=drop_rate)
hidden6 = dropout
'''第7隐层'''
with tf.variable_scope('hidden7'):
conv = tf.layers.conv2d(hidden6, filters=256, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden7 = dropout
'''第8隐层'''
with tf.variable_scope('hidden8'):
conv = tf.layers.conv2d(hidden7, filters=512, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
dropout = tf.layers.dropout(activation, rate=drop_rate)
hidden8 = dropout
'''第9隐层'''
with tf.variable_scope('hidden9'):
conv = tf.layers.conv2d(hidden8, filters=512, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
dropout = tf.layers.dropout(activation, rate=drop_rate)
hidden9 = dropout
'''第10隐层'''
with tf.variable_scope('hidden10'):
conv = tf.layers.conv2d(hidden9, filters=512, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden10 = dropout
'''第11隐层'''
with tf.variable_scope('hidden11'):
conv = tf.layers.conv2d(hidden10, filters=512, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
dropout = tf.layers.dropout(activation, rate=drop_rate)
hidden11 = dropout
'''第12隐层'''
with tf.variable_scope('hidden12'):
conv = tf.layers.conv2d(hidden11, filters=512, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
dropout = tf.layers.dropout(activation, rate=drop_rate)
hidden12 = dropout
'''第13隐层'''
with tf.variable_scope('hidden13'):
conv = tf.layers.conv2d(hidden12, filters=512, kernel_size=[3, 3], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden13 = dropout
flatten = tf.reshape(hidden13, [-1, 7 * 7 * 512])
'''第14隐层'''
#全连接
with tf.variable_scope('hidden14'):
dense = tf.layers.dense(flatten, units=4096, activation=tf.nn.relu)
hidden14 = dense
'''第15隐层'''
with tf.variable_scope('hidden15'):
dense = tf.layers.dense(hidden14, units=4096, activation=tf.nn.relu)
hidden15 = dense
'''第16隐层'''
with tf.variable_scope('hidden16'):
dense = tf.layers.dense(hidden15, units=101)
hidden16 = dense
age_logits = hidden16
return age_logits
@staticmethod
def loss(age_labels, age_logits):
#计算每一个softmax产生的误差,并加和
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=age_labels, logits=age_logits))
return loss
###-------
if __name__=='__main__':
im = mpimg.imread('images/cat.jpg')/255.
im_4d = im[np.newaxis]
x = tf.convert_to_tensor(im_4d, dtype=tf.float32)
label_logits = Model.inference(x, drop_rate=0.)
label_predictions = tf.argmax(label_logits, axis=1)
```
|
{
"source": "jellyedwards/dwitter",
"score": 2
}
|
#### File: dwitter/templatetags/insert_code_blocks.py
```python
import re
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
def to_code_block(m):
code = m.group('code')
code = re.sub(r'\\`', '`', code)
return '<code>%s</code>' % code
@register.filter
def insert_code_blocks(text):
result = re.sub(
r'`' # start with `
r'(?P<code>.*?)' # capture code block
r'(?<!\\)' # not preceded by \
r'`', # end with `
to_code_block,
text
)
return mark_safe(result)
```
#### File: tests/feed/test_post_views.py
```python
from django.test import TransactionTestCase, Client
from django.contrib.auth.models import User
from django.contrib import auth
from dwitter.models import Dweet, Comment, Hashtag
from django.utils import timezone
class PostDweetTestCase(TransactionTestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create(username="user", password="<PASSWORD>")
self.user.set_password("<PASSWORD>") # Created with proper hash so you can log in!
self.user.save()
self.dweet = Dweet.objects.create(id=1000,
code="dweet code",
posted=timezone.now(),
author=self.user)
def login(self):
# Log in
self.client.post('/accounts/login/',
{'username': 'user', 'password': '<PASSWORD>'},
follow=True)
user = auth.get_user(self.client)
self.assertTrue(user.is_authenticated, "Should be logged in after logging in")
return user
def test_post_new_dweet(self):
user = self.login()
response = self.client.post('/dweet', {'code': 'test_code'}, follow=True)
self.assertEqual(response.status_code, 200,
"Posting dweet return 200. Status code " + str(response.status_code))
dweet = Dweet.objects.get(code='test_code')
self.assertEqual(dweet.code, 'test_code')
self.assertEqual(dweet.author, user)
def test_post_new_dweet_with_first_comment(self):
user = self.login()
response = self.client.post('/dweet',
{'code': 'test_code', 'first-comment': 'hello there'},
follow=True)
self.assertEqual(response.status_code, 200,
"Posting dweet return 200. Status code " + str(response.status_code))
dweet = Dweet.objects.get(code='test_code')
self.assertEqual(dweet.code, 'test_code')
self.assertEqual(dweet.author, user)
comment = Comment.objects.get(reply_to=dweet)
self.assertEqual(comment.text, 'hello there')
self.assertEqual(comment.author, user)
def test_post_new_dweet_with_first_comment_with_hashtag(self):
user = self.login()
response = self.client.post('/dweet',
{'code': 'test_code', 'first-comment': 'hello there #woo'},
follow=True)
self.assertEqual(response.status_code, 200,
"Posting dweet return 200. Status code " + str(response.status_code))
dweet = Dweet.objects.get(code='test_code')
self.assertEqual(dweet.code, 'test_code')
self.assertEqual(dweet.author, user)
comment = Comment.objects.get(reply_to=dweet)
self.assertEqual(comment.text, 'hello there #woo')
self.assertEqual(comment.author, user)
hashtag = Hashtag.objects.get(name='woo')
self.assertEqual(dweet in hashtag.dweets.all(), True)
def test_too_long_dweet_post(self):
user = self.login()
response = self.client.post('/dweet', {'code': 'test code that is way too long,' +
'wow this looks long in code.' +
'We could fit so much in here.' +
'oh wow. mooooooooooooooooar text.' +
'Getting there.' +
'And BAM tooo long!'}, follow=True)
self.assertContains(response, "Dweet code too long!", status_code=400)
# shorter code should go through!
response = self.client.post('/dweet', {'code': 'test code that is a lot shorter,' +
'wow this looks long in code.' +
'And BAM not tooo long!'}, follow=True)
self.assertEqual(response.status_code, 200)
dweets = Dweet.objects.filter(author=user)
self.assertEqual(dweets.count(), 2)
def test_post_dweet_reply(self):
user = self.login()
response = self.client.post('/d/1000/reply', {'code': 'test_code'}, follow=True)
self.assertEqual(response.status_code, 200,
"Posting dweet return 200. Status code " + str(response.status_code))
dweet = Dweet.objects.get(code='test_code')
self.assertEqual(dweet.code, 'test_code')
self.assertEqual(dweet.author, user)
self.assertEqual(dweet.reply_to, self.dweet)
def test_post_dweet_reply_with_first_comment(self):
user = self.login()
response = self.client.post('/d/1000/reply',
{'code': 'test_code', 'first-comment': 'hello there'},
follow=True)
self.assertEqual(response.status_code, 200,
"Posting dweet return 200. Status code " + str(response.status_code))
dweet = Dweet.objects.get(code='test_code')
self.assertEqual(dweet.code, 'test_code')
self.assertEqual(dweet.author, user)
self.assertEqual(dweet.reply_to, self.dweet)
comment = Comment.objects.get(reply_to=dweet)
self.assertEqual(comment.text, 'hello there')
self.assertEqual(comment.author, user)
def test_post_dweet_reply_with_first_comment_with_hashtag(self):
user = self.login()
response = self.client.post('/d/1000/reply',
{'code': 'test_code', 'first-comment': 'hello there #woo'},
follow=True)
self.assertEqual(response.status_code, 200,
"Posting dweet return 200. Status code " + str(response.status_code))
dweet = Dweet.objects.get(code='test_code')
self.assertEqual(dweet.code, 'test_code')
self.assertEqual(dweet.author, user)
self.assertEqual(dweet.reply_to, self.dweet)
comment = Comment.objects.get(reply_to=dweet)
self.assertEqual(comment.text, 'hello there #woo')
self.assertEqual(comment.author, user)
hashtag = Hashtag.objects.get(name='woo')
self.assertEqual(dweet in hashtag.dweets.all(), True)
def test_too_long_dweet_reply(self):
user = self.login()
response = self.client.post('/d/1000/reply', {'code': 'test code that is way too long,' +
'wow this looks long in code.' +
'We could fit so much in here.' +
'oh wow. mooooooooooooooooar text.' +
'Getting there.' +
'And BAM tooo long!'}, follow=True)
self.assertContains(response, "Dweet code too long!", status_code=400)
# shorter code should go through!
response = self.client.post('/d/1000/reply', {'code': 'test code that is a lot shorter,' +
'wow this looks long in code.' +
'And BAM not tooo long!'}, follow=True)
self.assertEqual(response.status_code, 200)
dweets = Dweet.objects.filter(author=user)
self.assertEqual(dweets.count(), 2)
def test_like_dweet(self):
pass # TODO
def test_unlike_dweet(self):
pass # TODO
def test_delete_dweet(self):
pass # TODO
def test_GET_requests_fail(self):
pass # TODO
```
|
{
"source": "jellyfish26/OmoroINE",
"score": 4
}
|
#### File: OmoroINE/server/mainDB.py
```python
import sqlite3, json, random
# 駅情報を検索
def getStation(Search):
result = access(
"SELECT * from StationCode where LocalCode = '{}' and LineCode = '{}' and CodeStation = '{}';".format(Search[0],Search[1],Search[2]),'db/stationCode.db')
return result
# 駅データを返却
def returnData(data):
try:
result = getStation(data)
out = []
for i in range(3, 7):
out.append(result[0][i])
dict_g = {'Company': out[0], 'LineName': out[1], 'StationName': out[2], 'Memo': out[3]}
except:
dict_g = {}
return json.dumps(dict_g)
return json.dumps(dict_g, ensure_ascii=False, indent=4)
# SQLiteクエリ実行
def access(query, db_name):
print(query)
connection = sqlite3.connect(db_name)
cursor = connection.cursor()
try:
result = cursor.execute(query).fetchall()
except sqlite3.IntegrityError:
cursor.close()
connection.close()
raise sqlite3.IntegrityError
connection.commit()
cursor.close()
connection.close()
# print(result)
return result
# データ追加
def add(data):
try:
access(
'''INSERT into main(line, station, type, value, rating, rating_count) values("{}", "{}", "{}", "{}", 3, 1);'''.format(data[0],data[1],data[2],data[3]),'db/main.db')
return True
except:
return False
def random_select(line, station):
try:
arts = access('''SELECT * FROM main WHERE line=="{}" AND station=="{}"'''.format(line, station), 'db/main.db')
return random.choice(arts)
except:
return None
def update(u_id, rating):
try:
r = access('''SELECT rating, rating_count FROM main WHERE u_id=={}'''.format(u_id), 'db/main.db')[0]
rating = (r[0]*r[1]+rating)/(r[1]+1)
access('''UPDATE main SET rating = {}, rating_count = {} WHERE u_id=={}'''.format(rating, r[1]+1, u_id), 'db/main.db')
return True
except:
return False
# 初期化
def init_main_db():
access('''CREATE TABLE main(
u_id INTEGER PRIMARY KEY,
line TEXT NOT NULL ,
station TEXT NOT NULL,
type TEXT NOT NULL,
value TEXT NOT NULL,
rating REAL NOT NULL,
rating_count INTEGER NOT NULL);'''
, 'db/main.db')
if __name__ == '__main__':
init_main_db()
# u_id INTEGER PRIMARY KEY,
```
#### File: OmoroINE/server/tempDB.py
```python
import sqlite3
import secrets
def add(id, type, timestamp, value):
try:
access('INSERT INTO temp(line_id, type, value, timestamp) VALUES {}'.format((id, type, value, timestamp)))
except sqlite3.IntegrityError:
access("DELETE FROM temp WHERE line_id == '{}'".format(id))
access('INSERT INTO temp(line_id, type, value, timestamp) VALUES {}'.format((id, type, value, timestamp)))
def add_location(id, station):
try:
access('INSERT INTO temp_location(line_id,station) VALUES {}'.format((id, station)))
except sqlite3.IntegrityError:
access("DELETE FROM temp_location WHERE line_id == '{}'".format(id))
access('INSERT INTO temp_location(line_id,station) VALUES {}'.format((id, station)))
def get(id):
return access("SELECT * FROM temp WHERE line_id == '{}'".format(id))
def get_location(id):
return access("SELECT * FROM temp_location WHERE line_id == '{}'".format(id))
def delete(id):
access("DELETE FROM temp WHERE line_id == '{}'".format(id))
def delete_location(id):
access("DELETE FROM temp_location WHERE line_id == '{}'".format(id))
def issue_token(id):
token = secrets.token_hex()
access('''INSERT INTO rating_token(line_id, token) VALUES {}'''.format((id, str(token))))
return token
def isvalid_token(id, token):
result = access('''SELECT * FROM rating_token WHERE line_id == "{}" AND token == "{}"'''.format(id, token))
if result:
return True
return False
def delete_token(id, token):
access("DELETE FROM rating_token WHERE line_id == '{}' AND token == '{}'".format(id, token))
def access(query):
connection = sqlite3.connect('db/temp.db')
cursor = connection.cursor()
try:
result = cursor.execute(query).fetchall()
except sqlite3.IntegrityError:
cursor.close()
connection.close()
raise sqlite3.IntegrityError
connection.commit()
cursor.close()
connection.close()
return result
def init_temp_db():
# access('''CREATE TABLE temp(line_id TEXT NOT NULL,type TEXT NOT NULL ,value TEXT NOT NULL,timestamp INTEGER NOT NULL, PRIMARY KEY(line_id));''')
# access('''CREATE TABLE temp_location(line_id TEXT NOT NULL PRIMARY KEY ,station TEXT NOT NULL);''')
access('''CREATE TABLE rating_token(line_id TEXT NOT NULL PRIMARY KEY ,token TEXT NOT NULL);''')
if __name__ == '__main__':
init_temp_db()
```
|
{
"source": "JellyFive/3D-pose-estimation--translation",
"score": 2
}
|
#### File: JellyFive/3D-pose-estimation--translation/evaluate.py
```python
import numpy as np
import math
from pyquaternion import Quaternion
from Math import get_corners
def iou(gt_box, est_box):
xA = max(gt_box[0], est_box[0])
yA = max(gt_box[1], est_box[1])
xB = min(gt_box[2], est_box[2])
yB = min(gt_box[3], est_box[3])
if xB <= xA or yB <= yA:
return 0.
interArea = (xB - xA) * (yB - yA)
boxAArea = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1])
boxBArea = (est_box[2] - est_box[0]) * (est_box[3] - est_box[1])
return interArea / float(boxAArea + boxBArea - interArea)
def trans_error(gt_trans, est_trans):
"""
:param gt_trans: 真实的三维坐标,np.array([x,y,z])
:param est_trans: 预测的三维坐标
:return trans_err_normL: 平方和误差
:return trans_err_single: x,y,z方向分别的误差
"""
# L2范式,平方和
trans_err_norm = np.linalg.norm(gt_trans - est_trans)
# 绝对值
trans_err_single = np.abs(gt_trans - est_trans)
return trans_err_norm, trans_err_single
def rot_error(gt_rot, est_rot):
"""
:param gt_rot: 真实的旋转角度,np.array([patch,yaw,roll])
:param est_rot: 预测的旋转角度
:return rot_error = 2 * arccos(|gt_pose, est_pose|) * 180 / 3.14, 反余弦距离
"""
# 将欧拉角转换为四元数
def eulerAnglesToQu(theta):
q = np.array([math.cos(theta[0]/2)*math.cos(theta[1]/2)*math.cos(theta[2]/2)+math.sin(theta[0]/2)*math.sin(theta[1]/2)*math.sin(theta[2]/2),
math.sin(theta[0]/2)*math.cos(theta[1]/2)*math.cos(theta[2]/2) -
math.cos(theta[0]/2)*math.sin(theta[1]/2) *
math.sin(theta[2]/2),
math.cos(theta[0]/2)*math.sin(theta[1]/2)*math.cos(theta[2]/2) +
math.sin(theta[0]/2)*math.cos(theta[1]/2) *
math.sin(theta[2]/2),
math.cos(theta[0]/2)*math.cos(theta[1]/2)*math.sin(theta[2]/2) -
math.sin(theta[0]/2)*math.sin(theta[1]/2) *
math.cos(theta[2]/2)
])
return q
# gt_quat = eulerAnglesToQu(gt_rot)
# est_quat = eulerAnglesToQu(est_rot)
# ans = np.dot(gt_quat, est_quat.T)
# return np.rad2deg(2*math.acos(np.abs(ans)))
# 与上述等价
gt_quat = Quaternion(eulerAnglesToQu(gt_rot))
est_quat = Quaternion(eulerAnglesToQu(est_rot))
return np.abs((gt_quat * est_quat.inverse).degrees)
def add_err(dim, gt_trans, est_trans, gt_rot, est_rot):
"""
:param dim:目标的尺寸
:param gt_trans, gt_rot: 真实的位姿
:param est_rot, est_rot: 预测的位姿
:return add_error = 8个二维顶点的欧氏距离的平均值
"""
gt_corners_3D = get_corners(dim, gt_trans, gt_rot[0], gt_rot[1], gt_rot[2])
est_corners_3D = get_corners(
dim, est_trans, est_rot[0], est_rot[1], est_rot[2])
add_error = np.mean(np.linalg.norm(gt_corners_3D - est_corners_3D, axis=1))
return add_error
if __name__ == "__main__":
trans_errors_norm = []
trans_errors_single = []
rot_errors = []
adds = []
gt_bbox = np.array([120, 200, 400, 700])
est_bbox = np.array([120, 200, 400, 650])
dim = np.array([2, 2, 2])
gt_trans = np.array([1, 2, 3])
est_trans = np.array([1, 2.2, 3.5])
gt_rot = np.array([0.5237, -0.5237, 0])
est_rot = np.array([0.5237, -0.5537, 0])
diameter = np.sqrt(np.square(dim[0])+np.square(dim[1])+np.square(dim[2]))
if iou(gt_bbox, est_bbox) >= 0.5:
trans_error = trans_error(gt_trans, est_trans)
trans_errors_norm.append(trans_error[0])
trans_errors_single.append(trans_error[1])
rot_errors.append(rot_error(gt_rot, est_rot))
adds.append(add_err(dim, gt_trans, est_trans,
gt_rot, est_rot) < (0.1 * diameter))
mean_trans_error_norm = np.mean(trans_errors_norm)
mean_trans_error_single = np.mean(trans_errors_single, axis=0)
mean_rot_error = np.mean(rot_errors)
mean_add = np.mean(adds)
print("\tMean Trans Error Norm: {:.3f}".format(mean_trans_error_norm))
print("\tMean Rotation Error: {:.3f}".format(mean_rot_error))
print("\tMean Trans Errors: X: {:.3f}, Y: {:.3f}, Z: {:.3f}".format(mean_trans_error_single[0],
mean_trans_error_single[1],
mean_trans_error_single[2]))
print("\tMean ADD: {:.3f}".format(mean_add))
```
|
{
"source": "JellyJoe198/python-macros",
"score": 4
}
|
#### File: JellyJoe198/python-macros/Minecraft - empty buckets on 0.py
```python
import time
import threading
from pynput.mouse import Button
from pynput.mouse import Controller as mouseController
from pynput.keyboard import Listener, KeyCode, Key
##from pynput.keyboard import Controller as keyboardController
from defaultCommands import tripleClick, pressKey
button = Button.right
button_l = Button.left
exit_key = Key.esc # stop when Escape pressed
start_stop_key = KeyCode(96) # 96 = Numpad0 (Zero)
repeats = 9 # how many times will it repeat? (how many buckets to empty?)
inventory_key = 'e' # key to open inventory
hotbar_key = '2' # hotbar slot to use
delay = .05
start_delay = .2
middle_delay = .2
def wait(seco = delay):
time.sleep(seco)
def clicky(but=button):
## print('click')
mouse.click(but)
wait(.01)
def on_press(key):
if key == exit_key:
listener.stop()
global running
running = False
print("stopped by user\n")
elif key == start_stop_key:
print('macro started')
wait(start_delay)
for i in range(repeats):
mouse.scroll(0,-2)
wait()
clicky()
wait()
# As of now it has emptied buckets and will do inventory part next.
## print('done part 1')
wait(start_delay)
pressKey(inventory_key)
pressKey(hotbar_key) # take a bucket from hotbar
tripleClick(.05) # take all buckets
wait()
pressKey(hotbar_key) # put the buckets back in hotbar
wait()
pressKey(inventory_key) # exit inventory
print('macro finished\n')
mouse = mouseController()
# put this thread inside a try function to keep parent thread alive
running = True
try:
#Collect events in a non-blocking fashion:
listener = Listener(
on_press=on_press)
listener.start()
print("program started")
while running:
if False:
print("your computer is broken")
finally:
print('parent thread ended\n')
```
#### File: JellyJoe198/python-macros/volume down for ads.py
```python
down for ads.py<gh_stars>1-10
"""This program decreases Windows volume by a certain amount after a certain key is pressed,
then brings it back up after a certain time or after a certain button is pressed.
You can also press another button to bring up the console dialogue to change the amount.
Also it exits if a different certain key is pressed.
These keys can be seen in veriables: `exit_key` `down_key` `up_key` `change_key`
"""
import time
##import pyautogui
##print(pyautogui.KEYBOARD_KEYS)
from pyautogui import press
from pynput.keyboard import Listener, Key, KeyCode
##import logging
import threading
event = threading.Event
##times = 4
# start by asking user how far to go each time
def change_times():
# ask for user input and strip noise characters to get number
global times
try:
gotten = input("how many times to change? ")
times = int(gotten.strip('*/-'))
except:
print("there was a problem with your input. times set to 4")
times = 4
## print('times = {}'.format(times))
change_times()
##print('global times = {}'.format(times))
lowered = False
def downer(delay=1):
global lowered
if lowered:
return
# go down
print("going down")
lowered = True
for i in range(times):
press('volumedown')
# wait every 6th time to stop windows from skipping volume numbers
if not (i+1)%6:
## print("modulus active on i={}".format(i))
time.sleep(.2)
# wait by making a new thread to free up this thread for new inputs
x = threading.Thread(target=thread_function, args=(1,delay))
x.start()
# x.join()
def thread_function(name, delay):
## logging.info("Thread %s: starting", name)
time.sleep(delay)
## logging.info("Thread %s: finishing", name)
# go back up
uppinator()
def uppinator():
global lowered
if lowered:
lowered = False
print("going up")
for i in range(times):
press('volumeup')
exit_key = KeyCode(192) # '~' is 192, Esc is 27 or Key.esc
down_key = KeyCode(111)#'/'
up_key = KeyCode(106)#'*'
change_key = KeyCode(109)#'-'
def on_press(key):
if key == exit_key:
listener.stop()
global running
running = False
print("stopped by user\n",end='')
elif key == down_key:
downer(90)
elif key == up_key:
uppinator()
elif key == change_key:
change_times()
# i want to be able to update the times var based on user changing volume,
# but that could lead to unintended consequences.
# put this thread inside a try function to keep parent thread alive
running = True
try:
#Collect events in a non-blocking fashion:
listener = Listener(
on_press=on_press)
listener.start()
print("program started")
# wait untill child thread dies
while True:
if not running:
break
finally:
listener.stop() # retroactively kill child thread
print('parent thread ended\n',end='')
```
|
{
"source": "Jellymaxd/DejaPlay",
"score": 3
}
|
#### File: master/play2vec/building.py
```python
import pickle
import numpy as np
def Jaccard(A,B):
jtemp = len(A&B)
return round(jtemp/(len(A)+len(B)-jtemp),2)
if __name__ == '__main__':
path1=r'TrainedData/'
J_threshold = 0.3
ogm_train_data = pickle.load(open(path1+'ogm_train_data', 'rb'), encoding='bytes')
#cor_ogm_train_data = pickle.load(open(path1+'drop_ogm_train_data', 'rb'), encoding='bytes')
cor_ogm_train_data = pickle.load(open(path1+'noise_ogm_train_data', 'rb'), encoding='bytes')
senbag = ogm_train_data + cor_ogm_train_data
corpus = {}
id = 0
counter = 1
for wordbag in senbag:
if counter % 500 == 0:
print('processing:',counter,'/',len(senbag))
for words in wordbag:
temp = -1
temp_value = -1
if id == 0:
corpus[frozenset(words)] = id
id = id + 1
continue
for key, value in corpus.items():
J = Jaccard(frozenset(words),key)
if temp < J:
temp = J
temp_value = value
if temp > J_threshold:
corpus[frozenset(words)] = temp_value
else:
corpus[frozenset(words)] = id
id = id + 1
counter = counter + 1
pickle.dump(corpus, open(path1+'corpus', 'wb'), protocol=2)
```
#### File: master/Recommender_Labeller/mongoapi.py
```python
import pandas as pd
from pymongo import MongoClient
import dns
import json
path1='V:/SportsVU/rawsequences.json'
class mongo:
def connect(self):
print('connecting to mongo...')
# Connect to MongoDB
try:
self.client = MongoClient(
"mongodb+srv://GMAXD:[email protected]/dejaplaydb?retryWrites=true&w=majority")
self.db=self.client['dejaplaydb']
self.collection=self.db['nbasequences']
except Exception as e:
print('Exception', e)
return -1
print('connected to db...')
return self.collection.count_documents({})
def inserttodb(self, path):
with open(path) as f:
data = json.loads(f.read())
f.close()
data = pd.DataFrame(data)
try:
data.reset_index(inplace=True)
data_dict = data.to_dict("records")
# Insert collection
print("inserting records...")
self.client['dejaplaydb']['nbasequences'].insert_many(data_dict)
print('records inserted to collection')
except Exception as e:
print('Exception ',e)
def findseq_byid(self,seq_id, detaildflag):
try:
seq=self.collection.find_one({'index': str(seq_id - 1)})
print('record found...')
except Exception as e:
print('Exception ', e)
if (detaildflag == 1):
hometeam = seq['events']['home']['name']
awayteam = seq['events']['visitor']['name']
gameclock = seq['events']['moments'][0][2]
shotclock = seq['events']['moments'][0][3]
date = seq['gamedate']
quarter = seq['events']['moments'][0][0]
return({'seq_id': seq_id, 'hometeam':hometeam, 'awayteam': awayteam, 'gameclock':gameclock, 'shotclock':shotclock,
'matchdate':date, 'quarter':quarter})
else:
return seq
```
|
{
"source": "jellymlg/Bagicoin",
"score": 2
}
|
#### File: functional/test_framework/messages.py
```python
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, x16_hash_block
BIP0031_VERSION = 60000
MY_VERSION = 70025 # This needs to match the ASSETDATA_VERSION in version.h!
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 bagi in Corbies
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out
NODE_NETWORK = (1 << 0)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
MSG_WITNESS_FLAG = 1 << 30
# ===================================================
# Serialization/deserialization tools
# ===================================================
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(v):
if v < 253:
r = struct.pack("B", v)
elif v < 0x10000:
r = struct.pack("<BH", 253, v)
elif v < 0x100000000:
r = struct.pack("<BI", 254, v)
else:
r = struct.pack("<BQ", 255, v)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for _ in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for _ in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(v, ser_function_name=None):
r = ser_compact_size(len(v))
for i in v:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for _ in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(v):
r = ser_compact_size(len(v))
for i in v:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for _ in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(v):
r = ser_compact_size(len(v))
for sv in v:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for _ in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(v):
r = ser_compact_size(len(v))
for i in v:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def from_hex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def to_hex(obj):
return bytes_to_hex_str(obj.serialize())
class CAddress:
"""
Objects that map to bagid objects, which can be serialized/deserialized
"""
__slots__ = ("ip", "nServices", "pchReserved", "port", "time")
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv:
__slots__ = ("hash", "type")
typemap = {
0: "Error",
1: "TX",
2: "Block",
1 | MSG_WITNESS_FLAG: "WitnessTx",
2 | MSG_WITNESS_FLAG: "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator:
__slots__ = ("nVersion", "vHave")
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint:
__slots__ = ("hash", "n")
def __init__(self, hash_in=0, n=0):
self.hash = hash_in
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn:
__slots__ = ("nSequence", "prevout", "scriptSig")
def __init__(self, outpoint=None, script_sig=b"", n_sequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = script_sig
self.nSequence = n_sequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut:
__slots__ = ("nValue", "scriptPubKey")
def __init__(self, n_value=0, script_pub_key=b""):
self.nValue = n_value
self.scriptPubKey = script_pub_key
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness:
__slots__ = "stack"
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CScriptReissue:
__slots__ = ("name", "amount", "reissuable", "ipfs_hash")
def __init__(self):
self.name = b""
self.amount = 0
self.reissuable = 1
self.ipfs_hash = b""
def deserialize(self, f):
self.name = deser_string(f)
self.amount = struct.unpack("<q", f.read(8))[0]
self.reissuable = struct.unpack("B", f.read(1))[0]
self.ipfs_hash = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.name)
r += struct.pack("<q", self.amount)
r += struct.pack("B", self.reissuable)
r += ser_string(self.ipfs_hash)
return r
class CScriptOwner:
__slots__ = "name"
def __init__(self):
self.name = b""
def deserialize(self, f):
self.name = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.name)
return r
class CScriptIssue:
__slots__ = ("name", "amount", "units", "reissuable", "has_ipfs", "ipfs_hash")
def __init__(self):
self.name = b""
self.amount = 0
self.units = 0
self.reissuable = 1
self.has_ipfs = 0
self.ipfs_hash = b""
def deserialize(self, f):
self.name = deser_string(f)
self.amount = struct.unpack("<q", f.read(8))[0]
self.units = struct.unpack("B", f.read(1))[0]
self.reissuable = struct.unpack("B", f.read(1))[0]
self.has_ipfs = struct.unpack("B", f.read(1))[0]
self.ipfs_hash = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.name)
r += struct.pack("<q", self.amount)
r += struct.pack("B", self.units)
r += struct.pack("B", self.reissuable)
r += struct.pack("B", self.has_ipfs)
r += ser_string(self.ipfs_hash)
return r
class CScriptTransfer:
__slots__ = ("name", "amount")
def __init__(self):
self.name = b""
self.amount = 0
def deserialize(self, f):
self.name = deser_string(f)
self.amount = struct.unpack("<q", f.read(8))[0]
def serialize(self):
r = b""
r += ser_string(self.name)
r += struct.pack("<q", self.amount)
return r
class CTxInWitness:
__slots__ = "scriptWitness"
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness:
__slots__ = "vtxinwit"
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction:
__slots__ = ("nVersion", "vin", "vout", "wit", "nLockTime", "x16r", "hash")
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.x16r = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.x16r = tx.x16r
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bagid
if flags != 0:
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for _ in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.x16r = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if len(self.wit.vtxinwit) != len(self.vin):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for _ in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.x16r = None
self.calc_x16r()
return self.hash
# We will only cache the serialization without witness in
# self.x16r and self.hash -- those are expected to be the txid.
def calc_x16r(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.x16r is None:
self.x16r = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_x16r()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader:
__slots__ = ("nVersion", "hashPrevBlock", "hashMerkleRoot", "nTime", "nBits", "nNonce", "x16r", "hash")
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.x16r = header.x16r
self.hash = header.hash
self.calc_x16r()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.x16r = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.x16r = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_x16r(self):
if self.x16r is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.hash = x16_hash_block(encode(r, 'hex_codec').decode('ascii'), "2")
self.x16r = int(self.hash, 16)
def rehash(self):
self.x16r = None
self.calc_x16r()
return self.x16r
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
__slots__ = "vtx"
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i + 1, len(hashes) - 1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_x16r()
hashes.append(ser_uint256(tx.x16r))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_x16r(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_x16r()
target = uint256_from_compact(self.nBits)
if self.x16r > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.x16r > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert:
__slots__ = ("nVersion", "nRelayUntil", "nExpiration", "nID", "nCancel", "setCancel", "nMinVer",
"nMaxVer", "setSubVer", "nPriority", "strComment", "strStatusBar", "strReserved")
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert:
__slots__ = ("vchMsg", "vchSig")
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction:
__slots__ = ("index", "tx")
def __init__(self, index=0, tx=None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs:
__slots__ = ("header", "nonce", "shortids_length", "shortids", "prefilled_txn_length", "prefilled_txn")
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for _ in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (
repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length,
repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
__slots__ = ()
def serialize(self, with_witness=True):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs:
__slots__ = ("header", "nonce", "shortids", "prefilled_txn", "use_witness")
def __init__(self, p2pheaders_and_shortids=None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids is not None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [key0, key1]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list=None, use_witness=False):
if prefill_list is None:
prefill_list = [0]
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [PrefilledTransaction(i, block.vtx[i]) for i in prefill_list]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_x16r(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (
repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest:
__slots__ = ("blockhash", "indexes")
def __init__(self, blockhash=0, indexes=None):
self.blockhash = blockhash
self.indexes = indexes if indexes is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for _ in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x - last_index - 1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x + last_index + 1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions:
__slots__ = ("blockhash", "transactions")
def __init__(self, blockhash=0, transactions=None):
self.blockhash = blockhash
self.transactions = transactions if transactions is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class MsgVersion:
__slots__ = ("addrFrom", "addrTo", "nNonce", "nRelay", "nServices", "nStartingHeight", "nTime",
"nVersion", "strSubVer")
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except struct.error:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class MsgVerack:
__slots__ = ()
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
@staticmethod
def serialize():
return b""
def __repr__(self):
return "msg_verack()"
class MsgAddr:
__slots__ = "addrs"
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class MsgAlert:
__slots__ = "alert"
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert),)
class MsgInv:
__slots__ = "inv"
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class MsgGetdata:
__slots__ = "inv"
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv is not None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class MsgGetBlocks:
__slots__ = ("locator", "hashstop")
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class MsgTx:
__slots__ = "tx"
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class MsgWitnessTx(MsgTx):
__slots__ = "tx"
def serialize(self):
return self.tx.serialize_with_witness()
class MsgBlock:
__slots__ = "block"
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class MsgGeneric:
__slots__ = ("command", "data")
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class MsgWitnessBlock(MsgBlock):
__slots__ = "block"
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class MsgGetAddr:
__slots__ = ()
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
@staticmethod
def serialize():
return b""
def __repr__(self):
return "msg_getaddr()"
class MsgPingPreBip31:
__slots__ = ()
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
@staticmethod
def serialize():
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class MsgPing:
__slots__ = "nonce"
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class MsgPong:
__slots__ = "nonce"
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class MsgMempool:
__slots__ = ()
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
@staticmethod
def serialize():
return b""
def __repr__(self):
return "msg_mempool()"
class MsgNotFound:
__slots__ = "vec"
command = b"notfound"
def __init__(self, vec=None):
self.vec = vec or []
def deserialize(self, f):
self.vec = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.vec)
def __repr__(self):
return "msg_notfound(vec=%s)" % (repr(self.vec))
class MsgSendHeaders:
__slots__ = ()
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
@staticmethod
def serialize():
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class MsgGetHeaders:
__slots__ = ("locator", "hashstop")
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class MsgHeaders:
__slots__ = "headers"
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bagid indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class MsgReject:
__slots__ = ("message", "code", "reason", "data")
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class MsgFeeFilter:
__slots__ = "feerate"
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class MsgSendCmpct:
__slots__ = ("announce", "version")
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class MsgCmpctBlock:
__slots__ = "header_and_shortids"
command = b"cmpctblock"
def __init__(self, header_and_shortids=None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class MsgGetBlockTxn:
__slots__ = "block_txn_request"
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class MsgBlockTxn:
__slots__ = "block_transactions"
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class MsgWitnessBlocktxn(MsgBlockTxn):
__slots__ = "block_transactions"
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
```
|
{
"source": "jelly/osbuild",
"score": 2
}
|
#### File: osbuild/util/linux.py
```python
import array
import ctypes
import ctypes.util
import fcntl
import os
import platform
import threading
__all__ = [
"ioctl_get_immutable",
"ioctl_toggle_immutable",
]
# NOTE: These are wrong on at least ALPHA and SPARC. They use different
# ioctl number setups. We should fix this, but this is really awkward
# in standard python.
# Our tests will catch this, so we will not accidentally run into this
# on those architectures.
FS_IOC_GETFLAGS = 0x80086601
FS_IOC_SETFLAGS = 0x40086602
FS_IMMUTABLE_FL = 0x00000010
if platform.machine() == "ppc64le":
BLK_IOC_FLSBUF = 0x20001261
else:
BLK_IOC_FLSBUF = 0x00001261
def ioctl_get_immutable(fd: int):
"""Query FS_IMMUTABLE_FL
This queries the `FS_IMMUTABLE_FL` flag on a specified file.
Arguments
---------
fd
File-descriptor to operate on.
Returns
-------
bool
Whether the `FS_IMMUTABLE_FL` flag is set or not.
Raises
------
OSError
If the underlying ioctl fails, a matching `OSError` will be raised.
"""
if not isinstance(fd, int) or fd < 0:
raise ValueError()
flags = array.array('L', [0])
fcntl.ioctl(fd, FS_IOC_GETFLAGS, flags, True)
return bool(flags[0] & FS_IMMUTABLE_FL)
def ioctl_toggle_immutable(fd: int, set_to: bool):
"""Toggle FS_IMMUTABLE_FL
This toggles the `FS_IMMUTABLE_FL` flag on a specified file. It can both set
and clear the flag.
Arguments
---------
fd
File-descriptor to operate on.
set_to
Whether to set the `FS_IMMUTABLE_FL` flag or not.
Raises
------
OSError
If the underlying ioctl fails, a matching `OSError` will be raised.
"""
if not isinstance(fd, int) or fd < 0:
raise ValueError()
flags = array.array('L', [0])
fcntl.ioctl(fd, FS_IOC_GETFLAGS, flags, True)
if set_to:
flags[0] |= FS_IMMUTABLE_FL
else:
flags[0] &= ~FS_IMMUTABLE_FL
fcntl.ioctl(fd, FS_IOC_SETFLAGS, flags, False)
def ioctl_blockdev_flushbuf(fd: int):
"""Flush the block device buffer cache
NB: This function needs the `CAP_SYS_ADMIN` capability.
Arguments
---------
fd
File-descriptor of a block device to operate on.
Raises
------
OSError
If the underlying ioctl fails, a matching `OSError`
will be raised.
"""
if not isinstance(fd, int) or fd < 0:
raise ValueError(f"Invalid file descriptor: '{fd}'")
fcntl.ioctl(fd, BLK_IOC_FLSBUF, 0)
class LibCap:
"""Wrapper for libcap (capabilities commands and library) project"""
cap_value_t = ctypes.c_int
_lock = threading.Lock()
_inst = None
def __init__(self, lib: ctypes.CDLL) -> None:
self.lib = lib
# process-wide bounding set
get_bound = lib.cap_get_bound
get_bound.argtypes = (self.cap_value_t,)
get_bound.restype = ctypes.c_int
get_bound.errcheck = self._check_result
self._get_bound = get_bound
from_name = lib.cap_from_name
from_name.argtypes = (ctypes.c_char_p, ctypes.POINTER(self.cap_value_t),)
from_name.restype = ctypes.c_int
from_name.errcheck = self._check_result
self._from_name = from_name
to_name = lib.cap_to_name
to_name.argtypes = (ctypes.c_int,)
to_name.restype = ctypes.POINTER(ctypes.c_char)
to_name.errcheck = self._check_result
self._to_name = to_name
free = lib.cap_free
free.argtypes = (ctypes.c_void_p,)
free.restype = ctypes.c_int
free.errcheck = self._check_result
self._free = free
@staticmethod
def _check_result(result, func, args):
if result is None or (isinstance(result, int) and result == -1):
err = ctypes.get_errno()
msg = f"{func.__name__}{args} -> {result}: error ({err}): {os.strerror(err)}"
raise OSError(err, msg)
return result
@staticmethod
def make():
path = ctypes.util.find_library("cap")
if not path:
return None
try:
lib = ctypes.CDLL(path, use_errno=True)
except (OSError, ImportError):
return None
return LibCap(lib)
@staticmethod
def last_cap() -> int:
"""Return the int value of the highest valid capability"""
try:
with open("/proc/sys/kernel/cap_last_cap", "rb") as f:
data = f.read()
return int(data)
except FileNotFoundError:
return 0
@classmethod
def get_default(cls) -> "LibCap":
"""Return a singleton instance of the library"""
with cls._lock:
if cls._inst is None:
cls._inst = cls.make()
return cls._inst
def get_bound(self, capability: int) -> bool:
"""Return the current value of the capability in the thread's bounding set"""
# cap = self.cap_value_t(capability)
return self._get_bound(capability) == 1
def to_name(self, value: int) -> str:
"""Translate from the capability's integer value to the its symbolic name"""
raw = self._to_name(value)
val = ctypes.cast(raw, ctypes.c_char_p).value
res = str(val, encoding="utf-8")
self._free(raw)
return res.upper()
def from_name(self, value: str) -> int:
"""Translate from the symbolic name to its integer value"""
cap = self.cap_value_t()
self._from_name(value.encode("utf-8"), ctypes.pointer(cap))
return int(cap.value)
def cap_is_supported(capability: str = "CAP_CHOWN") -> bool:
"""Return whether a given capability is supported by the system"""
lib = LibCap.get_default()
if not lib:
return False
try:
value = lib.from_name(capability)
lib.get_bound(value)
return True
except OSError:
return False
def cap_bound_set() -> set:
"""Return the calling thread's capability bounding set
If capabilities are not supported this function will return the empty set.
"""
lib = LibCap.get_default()
if not lib:
return set()
res = set(
lib.to_name(cap)
for cap in range(lib.last_cap() + 1)
if lib.get_bound(cap)
)
return res
def cap_mask_to_set(mask: int) -> set:
lib = LibCap.get_default()
if not lib:
return set()
def bits(n):
count = 0
while n:
if n & 1:
yield count
count += 1
n >>= 1
res = {
lib.to_name(cap) for cap in bits(mask)
}
return res
```
#### File: test/mod/test_buildroot.py
```python
import pathlib
import os
import sys
from tempfile import TemporaryDirectory
import pytest
from osbuild.buildroot import BuildRoot
from osbuild.monitor import LogMonitor, NullMonitor
from osbuild.pipeline import detect_host_runner
from osbuild.util import linux
from ..test import TestBase
@pytest.fixture(name="tempdir")
def tempdir_fixture():
with TemporaryDirectory(prefix="lvm2-") as tmp:
yield tmp
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_basic(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
r = root.run(["/usr/bin/true"], monitor)
assert r.returncode == 0
# Test we can use `.run` multiple times
r = root.run(["/usr/bin/true"], monitor)
assert r.returncode == 0
r = root.run(["/usr/bin/false"], monitor)
assert r.returncode != 0
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_runner_fail(tempdir):
runner = "org.osbuild.nonexistantrunner"
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
logfile = os.path.join(tempdir, "log.txt")
with BuildRoot("/", runner, libdir, var) as root, \
open(logfile, "w") as log:
monitor = LogMonitor(log.fileno())
r = root.run(["/usr/bin/true"], monitor)
assert r.returncode == 1
with open(logfile) as f:
log = f.read()
assert log
assert r.output
assert log == r.output
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_output(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
data = "42. cats are superior to dogs"
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
r = root.run(["/usr/bin/echo", data], monitor)
assert r.returncode == 0
assert data in r.output.strip()
@pytest.mark.skipif(not TestBase.have_test_data(), reason="no test-data access")
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_bind_mounts(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
rw_data = pathlib.Path(tempdir, "data")
rw_data.mkdir()
scripts = os.path.join(TestBase.locate_test_data(), "scripts")
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
ro_binds = [f"{scripts}:/scripts"]
cmd = ["/scripts/mount_flags.py",
"/scripts",
"ro"]
r = root.run(cmd, monitor, readonly_binds=ro_binds)
assert r.returncode == 0
cmd = ["/scripts/mount_flags.py",
"/rw-data",
"ro"]
binds = [f"{rw_data}:/rw-data"]
r = root.run(cmd, monitor, binds=binds, readonly_binds=ro_binds)
assert r.returncode == 1
@pytest.mark.skipif(not TestBase.have_test_data(), reason="no test-data access")
@pytest.mark.skipif(not os.path.exists("/sys/fs/selinux"), reason="no SELinux")
def test_selinuxfs_ro(tempdir):
# /sys/fs/selinux must never be writable in the container
# because RPM and other tools must not assume the policy
# of the host is the valid policy
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
scripts = os.path.join(TestBase.locate_test_data(), "scripts")
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
ro_binds = [f"{scripts}:/scripts"]
cmd = ["/scripts/mount_flags.py",
"/sys/fs/selinux",
"ro"]
r = root.run(cmd, monitor, readonly_binds=ro_binds)
assert r.returncode == 0
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_proc_overrides(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
cmdline = "is-this-the-real-world"
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
root.proc.cmdline = cmdline
r = root.run(["cat", "/proc/cmdline"], monitor)
assert r.returncode == 0
assert cmdline in r.output.strip()
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_timeout(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
root.run(["/bin/sleep", "1"], monitor, timeout=2)
with pytest.raises(TimeoutError):
root.run(["/bin/sleep", "1"], monitor, timeout=0.1)
with pytest.raises(TimeoutError):
root.run(["/bin/sleep", "1"], monitor, timeout=0.1)
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_env_isolation(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
ipc = pathlib.Path(tempdir, "ipc")
ipc.mkdir()
# Set some env variable to make sure it is not leaked into
# the container
os.environ["OSBUILD_TEST_ENV_ISOLATION"] = "42"
with BuildRoot("/", runner, libdir, var) as root:
cmd = ["/bin/sh", "-c", "/usr/bin/env > /ipc/env.txt"]
r = root.run(cmd, monitor, binds=[f"{ipc}:/ipc"])
assert r.returncode == 0
with open(os.path.join(ipc, "env.txt")) as f:
data = f.read().strip()
assert data
have = dict(map(lambda x: x.split("=", 1), data.split("\n")))
allowed = [
"_", # added by `env` itself
"container",
"LC_CTYPE",
"PATH",
"PWD",
"PYTHONPATH",
"PYTHONUNBUFFERED",
"SHLVL", # added by the shell wrapper
"TERM",
]
for k in have:
assert k in allowed
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_caps(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
ipc = pathlib.Path(tempdir, "ipc")
ipc.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
def run_and_get_caps():
cmd = ["/bin/sh", "-c", "cat /proc/self/status > /ipc/status"]
r = root.run(cmd, monitor, binds=[f"{ipc}:/ipc"])
assert r.returncode == 0
with open(os.path.join(ipc, "status"), encoding="utf-8") as f:
data = f.readlines()
assert data
print(data)
perm = list(filter(lambda x: x.startswith("CapEff"), data))
assert perm and len(perm) == 1
perm = perm[0]
perm = perm[7:].strip() # strip "CapEff"
print(perm)
caps = linux.cap_mask_to_set(int(perm, base=16))
return caps
# check case of `BuildRoot.caps` is `None`, i.e. don't drop capabilities,
# thus the effective capabilities should be the bounding set
assert root.caps is None
bound_set = linux.cap_bound_set()
caps = run_and_get_caps()
assert caps == bound_set
# drop everything but `CAP_SYS_ADMIN`
assert "CAP_SYS_ADMIN" in bound_set
enable = set(["CAP_SYS_ADMIN"])
disable = bound_set - enable
root.caps = enable
caps = run_and_get_caps()
for e in enable:
assert e in caps
for d in disable:
assert d not in caps
```
|
{
"source": "jelly/plugin.fosdem.org",
"score": 2
}
|
#### File: resources/lib/addon.py
```python
from datetime import datetime, timedelta
import routing
import xbmcaddon
from xbmcgui import Dialog, ListItem
from xbmcplugin import addDirectoryItem, addSortMethod, endOfDirectory, setContent, setResolvedUrl, SORT_METHOD_LABEL, SORT_METHOD_UNSORTED
from fosdem import fetch_xml, contains_videos
from utils import html_to_kodi
FORMAT_URL = 'https://fosdem.org/{}/schedule/xml'
FORMATS = ['mp4', 'webm']
START_YEAR = 2012
plugin = routing.Plugin() # pylint: disable=invalid-name
addon = xbmcaddon.Addon('plugin.video.fosdem') # pylint: disable=invalid-name
def years():
now = datetime.now()
year = now.year
# Determine if FOSDEM happened this year already
if now.month < 2 and now.day < 3:
year -= 1
return range(year, START_YEAR - 1, -1)
def get_format():
return FORMATS[addon.getSettingInt('format')]
def get_metadata(event):
track = event.find('track').text
subtitle = event.find('subtitle').text
plot = ''
abstract = event.find('abstract').text
if abstract:
abstract = html_to_kodi(abstract)
else:
abstract = ''
description = event.find('description').text or event.find('abstract').text
if description:
description = html_to_kodi(description)
else:
description = ''
person_items = event.findall('./persons/person')
persons = [p.text for p in person_items] if person_items is not None else []
if persons:
plot += '[COLOR=blue]Presenter:[/COLOR] ' + ', '.join(persons) + '\n'
attachments = event.findall('./attachments/attachment')
if attachments:
plot += '[COLOR=red]Slides available[/COLOR]\n'
if plot:
plot += '\n'
if abstract:
plot += '[I]' + abstract + '[/I]\n'
if description:
if abstract:
plot += '\n'
plot += description
return dict(
cast=persons,
genre=track,
plot=plot,
tagline=subtitle,
)
@plugin.route('/')
def main_menu():
for year in years():
year = str(year)
url = plugin.url_for(show_dir, subdir=year)
addDirectoryItem(plugin.handle, url, ListItem(year), True)
endOfDirectory(plugin.handle)
@plugin.route('/noop')
def noop():
"""The API interface to do nothing"""
endOfDirectory(plugin.handle)
@plugin.route('/dir/<path:subdir>')
def show_dir(subdir=''):
root = fetch_xml(subdir)
for day in root.findall('day'):
number = day.attrib['index']
date = day.attrib['date']
text = '[B]Day {number}[/B] ({date})'.format(number=number, date=date)
url = plugin.url_for(show_day, year=subdir, day=number)
addDirectoryItem(plugin.handle, url,
ListItem(text), True)
endOfDirectory(plugin.handle)
@plugin.route('/day/<year>/<day>')
def show_day(year, day):
exp = './day[@index="{day}"]/room'.format(day=day)
root = fetch_xml(year)
for room in root.findall(exp):
if not contains_videos(room.findall('./event/links/link')):
continue
room_name = room.attrib['name']
track = room.find('./event/track').text
text = '[B]{track}[/B] - {room_name}'.format(track=track, room_name=room_name)
url = plugin.url_for(show_room, year=year, day=day, room=room_name)
addDirectoryItem(plugin.handle, url,
ListItem(text), True)
addSortMethod(handle=plugin.handle, sortMethod=SORT_METHOD_LABEL)
endOfDirectory(plugin.handle)
@plugin.route('/room/<year>/<day>/<room>')
def show_room(day, year, room):
exp = './day[@index="{}"]/room[@name="{}"]/event'.format(day, room)
root = fetch_xml(year)
for event in root.findall(exp):
event_id = event.attrib['id']
title = event.find('title').text
duration = event.find('duration').text or '00:00'
if contains_videos(event.findall('./links/link')):
url = plugin.url_for(show_event,
year=year,
event_id=event_id)
playable = 'true'
stream = 'true'
# duration is formatted as 01:30
hour, minute = duration.split(':')
seconds = timedelta(hours=int(hour), minutes=int(minute)).total_seconds()
else:
url = plugin.url_for(noop)
title = '[COLOR=gray]{title}[/COLOR]'.format(title=title)
playable = 'false'
stream = 'false'
seconds = 0
item = ListItem(title)
item.setProperty('IsPlayable', playable)
item.setProperty('IsInternetStream', stream)
item.setInfo('video', get_metadata(event))
if seconds:
item.addStreamInfo('video', {
'duration': seconds
})
addDirectoryItem(plugin.handle, url, item, False)
setContent(plugin.handle, 'videos')
addSortMethod(handle=plugin.handle, sortMethod=SORT_METHOD_UNSORTED)
addSortMethod(handle=plugin.handle, sortMethod=SORT_METHOD_LABEL)
endOfDirectory(plugin.handle)
@plugin.route('/event/<year>/<event_id>')
def show_event(year, event_id):
root = fetch_xml(year)
event = root.find('.//event[@id="{}"]'.format(event_id))
videos = [link.attrib['href'] for link in event.findall('./links/link') if 'video.fosdem.org' in link.attrib['href']]
if not videos:
Dialog().ok('Error playing video', 'FOSDEM event {id} in {year} has no videos.'.format(id=event_id, year=year))
endOfDirectory(plugin.handle)
return
video_format = get_format()
urls = [video for video in videos if video.endswith(video_format)]
if urls:
url = urls[0]
else:
# Select a random video
url = videos[0]
title = event.find('title').text
item = ListItem(title, path=url)
item.setInfo('video', get_metadata(event))
setResolvedUrl(plugin.handle, True, item)
def run(argv):
"""Addon entry point from wrapper"""
plugin.run(argv)
```
#### File: plugin.fosdem.org/tests/test_routing.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import addon
xbmc = __import__('xbmc')
xbmcaddon = __import__('xbmcaddon')
xbmcgui = __import__('xbmcgui')
xbmcplugin = __import__('xbmcplugin')
plugin = addon.plugin
class TestRouting(unittest.TestCase):
"""TestCase class"""
def test_main_menu(self):
"""Main menu: /"""
addon.run(['plugin://plugin.video.fosdem', '0', ''])
self.assertEqual(plugin.url_for(addon.main_menu), 'plugin://plugin.video.fosdem/')
def test_show_dir(self):
"""Directory: /dir/2020"""
addon.run(['plugin://plugin.video.fosdem/dir/2020', '0', ''])
self.assertEqual(plugin.url_for(addon.show_dir, subdir='2020'), 'plugin://plugin.video.fosdem/dir/2020')
def test_show_day(self):
"""Day: /day/2020/1"""
addon.run(['plugin://plugin.video.fosdem/day/2020/1', '0', ''])
self.assertEqual(plugin.url_for(addon.show_day, year='2020', day='1'), 'plugin://plugin.video.fosdem/day/2020/1')
def test_show_room(self):
"""Room: /room/2020/1/Janson"""
addon.run(['plugin://plugin.video.fosdem/room/2020/1/Janson', '0', ''])
self.assertEqual(plugin.url_for(addon.show_room, year='2020', day='1', room='Janson'), 'plugin://plugin.video.fosdem/room/2020/1/Janson')
addon.run(['plugin://plugin.video.fosdem/room/2016/1/UB2.252A (Lameere)', '0', ''])
self.assertEqual(plugin.url_for(addon.show_room, year='2016', day='1', room='UB2.252A (Lameere)'),
'plugin://plugin.video.fosdem/room/2016/1/UB2.252A (Lameere)')
def test_show_event(self):
"""Event: /event/2020/9025"""
addon.run(['plugin://plugin.video.fosdem/event/2020/9025', '0', ''])
self.assertEqual(plugin.url_for(addon.show_event, year='2020', event_id='9025'), 'plugin://plugin.video.fosdem/event/2020/9025')
addon.run(['plugin://plugin.video.fosdem/event/2020/10715', '0', ''])
self.assertEqual(plugin.url_for(addon.show_event, year='2020', event_id='10715'), 'plugin://plugin.video.fosdem/event/2020/10715')
def test_show_event_fail(self):
"""Event: /event/2020/9604"""
addon.run(['plugin://plugin.video.fosdem/event/2020/9604', '0', ''])
self.assertEqual(plugin.url_for(addon.show_event, year='2020', event_id='9604'), 'plugin://plugin.video.fosdem/event/2020/9604')
```
#### File: plugin.fosdem.org/tests/xbmc.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
LOGLEVELS = ['Debug', 'Info', 'Notice', 'Warning', 'Error', 'Severe', 'Fatal', 'None']
LOGDEBUG = 0
LOGINFO = 1
LOGNOTICE = 2
LOGWARNING = 3
LOGERROR = 4
LOGSEVERE = 5
LOGFATAL = 6
LOGNONE = 7
def log(msg, level=0):
"""A reimplementation of the xbmc log() function"""
color1 = '\033[32;1m'
color2 = '\033[32;0m'
name = LOGLEVELS[level]
if level in (4, 5, 6, 7):
color1 = '\033[31;1m'
if level in (6, 7):
raise Exception(msg)
elif level in (2, 3):
color1 = '\033[33;1m'
elif level == 0:
color2 = '\033[30;1m'
print('{color1}{name}: {color2}{msg}\033[39;0m'.format(name=name, color1=color1, color2=color2, msg=msg))
```
|
{
"source": "jelly-prog/discordpy-startup",
"score": 3
}
|
#### File: jelly-prog/discordpy-startup/discordbot.py
```python
from discord.ext import commands
import os
import traceback
import discord
from grouping import MakeTeam
intents = discord.Intents.all() # デフォルトのIntentsオブジェクトを生成
bot = commands.Bot(command_prefix='/', intents=intents)
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
# メンバー数が均等になるチーム分け
@bot.command()
async def team(ctx, specified_num=2):
make_team = MakeTeam()
remainder_flag = 'true'
msg = make_team.make_party_num(ctx, specified_num, remainder_flag)
await ctx.channel.send(msg)
# メンバー数が均等にはならないチーム分け
@bot.command()
async def team_norem(ctx, specified_num=2):
make_team = MakeTeam()
msg = make_team.make_party_num(ctx, specified_num)
await ctx.channel.send(msg)
# メンバー数を指定してチーム分け
@bot.command()
async def group(ctx, specified_num=1):
make_team = MakeTeam()
msg = make_team.make_specified_len(ctx, specified_num)
await ctx.channel.send(msg)
"""テストコマンド"""
@bot.command()
async def ping(ctx):
await ctx.send('pong')
bot.run(token)
```
|
{
"source": "jelly/prometheus-arch-exporter",
"score": 2
}
|
#### File: jelly/prometheus-arch-exporter/prometheus-arch-exporter.py
```python
import argparse
from os import symlink
from shutil import rmtree
from tempfile import mkdtemp
from subprocess import check_output
from wsgiref.simple_server import make_server
import requests
from prometheus_client import make_wsgi_app, Metric, REGISTRY
from pyalpm import sync_newversion, vercmp
from pycman.config import init_with_config_and_options, init_with_config
PORT = 9097
USER_AGENT = 'Prometheus-Arch-Exporter/1.0'
class PacmanConf:
dbpath = None
config = '/etc/pacman.conf'
root = None
gpgdir = None
arch = None
logfile = None
cachedir = None
debug = None
def checkupdates():
count = 0
options = PacmanConf()
tempdir = mkdtemp(dir='/tmp')
options.dbpath = tempdir
symlink('/var/lib/pacman/local', f'{tempdir}/local')
# Workaround for passing a different DBPath but with the system pacman.conf
handle = init_with_config_and_options(options)
for db in handle.get_syncdbs():
db.update(False)
db = handle.get_localdb()
for pkg in db.pkgcache:
if sync_newversion(pkg, handle.get_syncdbs()) is None:
continue
count += 1
rmtree(tempdir)
return count
def vulernablepackges():
count = 0
handle = init_with_config('/etc/pacman.conf')
db = handle.get_localdb()
# XXX: error handling
user_agent = {'User-Agent': USER_AGENT}
r = requests.get('https://security.archlinux.org/issues.json', headers=user_agent)
advisories = r.json()
for adv in advisories:
version = adv['fixed']
packages = adv['packages']
if not version:
continue
if not any(db.get_pkg(pkg) for pkg in packages):
continue
for pkg in packages:
alpm_pkg = db.get_pkg(pkg)
if not alpm_pkg:
continue
if vercmp(version, alpm_pkg.version) > 0:
count += 1
return count
class ArchCollector(object):
def collect(self):
packages = checkupdates()
metric = Metric('arch_checkupdates', 'Arch Linux Packages out of date', 'gauge')
metric.add_sample('arch_checkupdates', value=(packages), labels={})
yield metric
security_issues = vulernablepackges()
metric = Metric('arch_audit', 'Arch Audit Packages', 'gauge')
metric.add_sample('arch_audit', value=(security_issues), labels={})
yield metric
def main():
parser = argparse.ArgumentParser(description='Arch Linux exporter for Prometheus')
parser.add_argument('-p', '--port', help=f'exporter exposed port (default {PORT})', type=int, default=PORT)
args = parser.parse_args()
REGISTRY.register(ArchCollector())
app = make_wsgi_app()
httpd = make_server('', args.port, app)
httpd.serve_forever()
if __name__ == "__main__":
main()
```
|
{
"source": "jelly/sha2017-badge-weather-forecast",
"score": 2
}
|
#### File: jelly/sha2017-badge-weather-forecast/service.py
```python
def setup():
import badge
import easydraw
import machine
easydraw.msg("Setting weather overview as boot app")
badge.nvs_set_str('boot', 'splash', 'WeekWeather')
machine.deepsleep(1)
```
|
{
"source": "jellysnake/docInspector",
"score": 3
}
|
#### File: DocInspector/Writers/OutputCsv.py
```python
from datetime import datetime, timezone
from typing import List
from DocInspector.DocStats import DocStats
def outputGenerals(stats: DocStats) -> List[str]:
"""
Create the general stats in a csv format
:param stats: The stats to convert
:return: The general stats in a csv format
"""
return ["General Stats",
f"Name, {stats.general.name}",
f"Creation Date, {stats.general.creationDate}",
f"Link, https://docs.google.com/document/d/{stats.general.id}/view",
""]
def outputIndividuals(stats: DocStats) -> List[str]:
"""
Create the individual stats table
:param stats: The source of the stats to use
:return: The individual stats ina csv format
"""
# Create the header rows
output = ["Individual Stats"
"Name, Additions, Removals, Changes, Percent"]
# Create a row for each editor
for editorId in stats.individuals.getEditors():
editor = stats.individuals.getEditor(editorId)
output.append(f"{editor.name}, {editor.additions}, {editor.removals}, {editor.changes}, {editor.percent}")
return output + [""]
def outputTimeline(stats: DocStats) -> List[str]:
"""
Creates the timeline for all increments.
This does not cull increments where nothing happened.
:param stats: The stats to use for the timeline
:return: The timeline in csv format
"""
editorIds = stats.individuals.getEditors()
# Create the header rows
output = ["Timeline Stats"
f"Additions,{','*len(editorIds)},Removals"]
additionLine = "Date, "
removalLine = "Date, "
for editor in editorIds:
additionLine += stats.individuals.editors[editor].name + ","
removalLine += stats.individuals.editors[editor].name + ","
output.append(additionLine + "," + removalLine)
# Create the timeline rows
time = stats.timeline.timelineStart
for increment in stats.timeline.increments:
# Add the increment date
additionLine = datetime.fromtimestamp(time / 1000) \
.replace(tzinfo=timezone.utc) \
.astimezone(tz=None) \
.strftime('%d/%m/%Y - %I:%M:%S %p') \
+ ","
removalLine = str(additionLine) # We want a copy not the same
# Add each editor's additions
for editor in editorIds:
if editor in increment.editors:
additionLine += str(increment.editors[editor].additions or "") + ","
removalLine += str(increment.editors[editor].removals or "") + ","
else:
additionLine += ","
removalLine += ","
output.append(additionLine + "," + removalLine)
time += stats.timeline.incrementSize
return output
def outputCsv(stats: DocStats) -> str:
"""
Convert the stats provided into a csv representation of them.
:param stats: The stats to convert
:return: A string of all the stats in CSV format
"""
output = []
output.extend(outputGenerals(stats))
output.extend(outputIndividuals(stats))
output.extend(outputTimeline(stats))
return "\n".join(output)
```
|
{
"source": "jellysquider/magenta",
"score": 2
}
|
#### File: magenta/music/chords_encoder_decoder.py
```python
from magenta.music import chord_symbols_lib
from magenta.music import constants
from magenta.music import encoder_decoder
NOTES_PER_OCTAVE = constants.NOTES_PER_OCTAVE
NO_CHORD = constants.NO_CHORD
class ChordEncodingException(Exception):
pass
class MajorMinorChordOneHotEncoding(encoder_decoder.OneHotEncoding):
"""Encodes chords as root + major/minor, with zero index for "no chord".
Encodes chords as follows:
0: "no chord"
1-12: chords with a major triad, where 1 is C major, 2 is C# major, etc.
13-24: chords with a minor triad, where 13 is C minor, 14 is C# minor, etc.
"""
# Mapping from pitch class index to name. Eventually this should be defined
# more globally, but right now only `decode_chord` needs it.
_PITCH_CLASS_MAPPING = ['C', 'C#', 'D', 'E-', 'E', 'F',
'F#', 'G', 'A-', 'A', 'B-', 'B']
def __init__(self, chord_symbol_functions=
chord_symbols_lib.ChordSymbolFunctions.get()):
"""Initialize the MajorMinorChordOneHotEncoding object.
Args:
chord_symbol_functions: ChordSymbolFunctions object with which to perform
the actual transposition of chord symbol strings.
"""
self._chord_symbol_functions = chord_symbol_functions
@property
def num_classes(self):
return 2 * NOTES_PER_OCTAVE + 1
@property
def default_event(self):
return NO_CHORD
def encode_event(self, event):
if event == NO_CHORD:
return 0
root = self._chord_symbol_functions.chord_symbol_root(event)
quality = self._chord_symbol_functions.chord_symbol_quality(event)
if quality == chord_symbols_lib.CHORD_QUALITY_MAJOR:
return root + 1
elif quality == chord_symbols_lib.CHORD_QUALITY_MINOR:
return root + NOTES_PER_OCTAVE + 1
else:
raise ChordEncodingException('chord is neither major nor minor: %s'
% event)
def decode_event(self, index):
if index == 0:
return NO_CHORD
elif index - 1 < 12:
# major
return self._PITCH_CLASS_MAPPING[index - 1]
else:
# minor
return self._PITCH_CLASS_MAPPING[index - NOTES_PER_OCTAVE - 1] + 'm'
```
#### File: magenta/music/sequences_lib.py
```python
import collections
import copy
# internal imports
from magenta.music import constants
from magenta.protobuf import music_pb2
# Set the quantization cutoff.
# Note events before this cutoff are rounded down to nearest step. Notes
# above this cutoff are rounded up to nearest step. The cutoff is given as a
# fraction of a step.
# For example, with quantize_cutoff = 0.75 using 0-based indexing,
# if .75 < event <= 1.75, it will be quantized to step 1.
# If 1.75 < event <= 2.75 it will be quantized to step 2.
# A number close to 1.0 gives less wiggle room for notes that start early,
# and they will be snapped to the previous step.
QUANTIZE_CUTOFF = 0.5
# Shortcut to chord symbol text annotation type.
CHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
class BadTimeSignatureException(Exception):
pass
class MultipleTimeSignatureException(Exception):
pass
class MultipleTempoException(Exception):
pass
class NegativeTimeException(Exception):
pass
def extract_subsequence(sequence, start_time, end_time):
"""Extracts a subsequence from a NoteSequence.
Notes starting before `start_time` are not included. Notes ending after
`end_time` are truncated.
Args:
sequence: The NoteSequence to extract a subsequence from.
start_time: The float time in seconds to start the subsequence.
end_time: The float time in seconds to end the subsequence.
Returns:
A new NoteSequence that is a subsequence of `sequence` in the specified time
range.
"""
subsequence = music_pb2.NoteSequence()
subsequence.CopyFrom(sequence)
del subsequence.notes[:]
for note in sequence.notes:
if note.start_time < start_time or note.start_time >= end_time:
continue
new_note = subsequence.notes.add()
new_note.CopyFrom(note)
new_note.end_time = min(note.end_time, end_time)
subsequence.total_time = min(sequence.total_time, end_time)
return subsequence
def is_power_of_2(x):
return x and not x & (x - 1)
class QuantizedSequence(object):
"""Holds notes and chords which have been quantized to time steps.
Notes contain a pitch, velocity, start time, and end time. Notes
are stored in tracks (which can be different instruments or the same
instrument). There is also a time signature and key signature.
Notes stored in this object are not guaranteed to be sorted by time.
Attributes:
tracks: A dictionary mapping track number to list of Note tuples. Track
number is taken from the instrument number of each NoteSequence note.
chords: A list of ChordSymbol tuples.
qpm: Quarters per minute. This is needed to recover tempo if converting back
to MIDI.
time_signature: This determines the length of a bar of music. This is just
needed to compute the number of quantization steps per bar, though it
can also communicate more high level aspects of the music
(see https://en.wikipedia.org/wiki/Time_signature).
steps_per_quarter: How many quantization steps per quarter note of music.
total_steps: The total number of steps in the quantized sequence.
"""
# Disabling pylint since it is recognizing these as attributes instead of
# classes.
# pylint: disable=invalid-name
Note = collections.namedtuple(
'Note',
['pitch', 'velocity', 'start', 'end', 'instrument', 'program', 'is_drum'])
TimeSignature = collections.namedtuple('TimeSignature',
['numerator', 'denominator'])
ChordSymbol = collections.namedtuple('ChordSymbol', ['step', 'figure'])
# pylint: enable=invalid-name
def __init__(self):
self._reset()
def _reset(self):
self.tracks = {}
self.chords = []
self.qpm = 120.0
self.time_signature = QuantizedSequence.TimeSignature(numerator=4,
denominator=4)
self.steps_per_quarter = 4
self.total_steps = 0
def steps_per_bar(self):
"""Calculates steps per bar.
Returns:
Steps per bar as a floating point number.
"""
quarters_per_beat = 4.0 / self.time_signature.denominator
quarters_per_bar = (quarters_per_beat * self.time_signature.numerator)
steps_per_bar_float = (self.steps_per_quarter * quarters_per_bar)
return steps_per_bar_float
def from_note_sequence(self, note_sequence, steps_per_quarter):
"""Populate self with a music_pb2.NoteSequence proto.
Notes and time signature are saved to self with notes' start and end times
quantized. If there is no time signature 4/4 is assumed. If there is more
than one time signature an exception is raised.
The quarter notes per minute stored in `note_sequence` is used to normalize
tempo. Regardless of how fast or slow quarter notes are played, a note that
is played for 1 quarter note will last `steps_per_quarter` time steps in
the quantized result.
A note's start and end time are snapped to a nearby quantized step. See
the comments above `QUANTIZE_CUTOFF` for details.
Args:
note_sequence: A music_pb2.NoteSequence protocol buffer.
steps_per_quarter: Each quarter note of music will be divided into this
many quantized time steps.
Raises:
MultipleTimeSignatureException: If there is a change in time signature
in `note_sequence`.
MultipleTempoException: If there is a change in tempo in `note_sequence`.
BadTimeSignatureException: If the time signature found in `note_sequence`
has a denominator which is not a power of 2.
NegativeTimeException: If a note or chord occurs at a negative time.
"""
self._reset()
self.steps_per_quarter = steps_per_quarter
if note_sequence.time_signatures:
time_signatures = sorted(note_sequence.time_signatures,
key=lambda ts: ts.time)
# There is an implicit 4/4 time signature at 0 time. So if the first time
# signature is something other than 4/4 and it's at a time other than 0,
# that's an implicit time signature change.
if time_signatures[0].time != 0 and not (
time_signatures[0].numerator == 4 and
time_signatures[0].denominator == 4):
raise MultipleTimeSignatureException(
'NoteSequence has an implicit change from initial 4/4 time '
'signature.')
self.time_signature = QuantizedSequence.TimeSignature(
time_signatures[0].numerator, time_signatures[0].denominator)
for time_signature in time_signatures[1:]:
if (time_signature.numerator != self.time_signature.numerator or
time_signature.denominator != self.time_signature.denominator):
raise MultipleTimeSignatureException(
'NoteSequence has at least one time signature change.')
if not is_power_of_2(self.time_signature.denominator):
raise BadTimeSignatureException(
'Denominator is not a power of 2. Time signature: %d/%d' %
(self.time_signature.numerator, self.time_signature.denominator))
if note_sequence.tempos:
tempos = sorted(note_sequence.tempos, key=lambda t: t.time)
# There is an implicit 120.0 qpm tempo at 0 time. So if the first tempo is
# something other that 120.0 and it's at a time other than 0, that's an
# implicit tempo change.
if tempos[0].time != 0 and tempos[0].qpm != 120.0:
raise MultipleTempoException(
'NoteSequence has an implicit tempo change from initial 120.0 qpm')
self.qpm = tempos[0].qpm
for tempo in tempos[1:]:
if tempo.qpm != self.qpm:
raise MultipleTempoException(
'NoteSequence has at least one tempo change.')
else:
self.qpm = constants.DEFAULT_QUARTERS_PER_MINUTE
# Compute quantization steps per second.
steps_per_second = steps_per_quarter * self.qpm / 60.0
quantize = lambda x: int(x + (1 - QUANTIZE_CUTOFF))
self.total_steps = quantize(note_sequence.total_time * steps_per_second)
for note in note_sequence.notes:
# Quantize the start and end times of the note.
start_step = quantize(note.start_time * steps_per_second)
end_step = quantize(note.end_time * steps_per_second)
if end_step == start_step:
end_step += 1
# Do not allow notes to start or end in negative time.
if start_step < 0 or end_step < 0:
raise NegativeTimeException(
'Got negative note time: start_step = %s, end_step = %s' %
(start_step, end_step))
# Extend quantized sequence if necessary.
if end_step > self.total_steps:
self.total_steps = end_step
if note.instrument not in self.tracks:
self.tracks[note.instrument] = []
self.tracks[note.instrument].append(
QuantizedSequence.Note(pitch=note.pitch,
velocity=note.velocity,
start=start_step,
end=end_step,
instrument=note.instrument,
program=note.program,
is_drum=note.is_drum))
# Also add chord symbol annotations to the quantized sequence.
for annotation in note_sequence.text_annotations:
if annotation.annotation_type == CHORD_SYMBOL:
# Quantize the chord time, disallowing negative time.
step = quantize(annotation.time * steps_per_second)
if step < 0:
raise NegativeTimeException(
'Got negative chord time: step = %s' % step)
self.chords.append(
QuantizedSequence.ChordSymbol(step=step, figure=annotation.text))
def __eq__(self, other):
if not isinstance(other, QuantizedSequence):
return False
for track in self.tracks:
if (track not in other.tracks or
set(self.tracks[track]) != set(other.tracks[track])):
return False
return (
self.qpm == other.qpm and
self.time_signature == other.time_signature and
self.steps_per_quarter == other.steps_per_quarter and
self.total_steps == other.total_steps and
set(self.chords) == set(other.chords))
def __deepcopy__(self, unused_memo=None):
new_copy = type(self)()
new_copy.tracks = copy.deepcopy(self.tracks)
new_copy.chords = copy.deepcopy(self.chords)
new_copy.qpm = self.qpm
new_copy.time_signature = self.time_signature
new_copy.steps_per_quarter = self.steps_per_quarter
new_copy.total_steps = self.total_steps
return new_copy
```
|
{
"source": "Jellytrial/news_classification_webapp",
"score": 3
}
|
#### File: app/newsclassifier/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
from sklearn.externals import joblib
import geturldoc
import urllib
import sys
# Create your views here.
# make naivebayes classifier
nb = joblib.load('trained_nb.m')
def news_classification(request):
d = {
'url': request.GET.get('url'),
}
if d['url']:
try:
html_text = geturldoc.get_news_text(request.GET.get('url'))
# print(html_text)
d['category'] = nb.classifier(html_text)
print('classify succeed')
# score = news_classifier.score(html_text, d['category'])
# print(score)
except ValueError as instance:
print(instance, file=sys.stderr)
d['category'] = False
except urllib.error.HTTPError as instance:
print(instance, file=sys.stderr)
d['category'] = False
except urllib.error.URLError as instance:
print(instance, file=sys.stderr)
d['category'] = False
return render(request, 'index.html', d)
```
#### File: news_classification_webapp/app/traindata.py
```python
from bs4 import BeautifulSoup
from urllib import request
from nbclass import NaiveBayes
from sklearn.externals import joblib
from urllib.error import HTTPError, URLError
import csv
import time
def gunosy_category(model):
categories = {
'https://gunosy.com/categories/1': 'エンタメ',
'https://gunosy.com/categories/2': 'スポーツ',
'https://gunosy.com/categories/3': 'おもしろ',
'https://gunosy.com/categories/4': '国内',
'https://gunosy.com/categories/5': '海外',
'https://gunosy.com/categories/6': 'コラム',
'https://gunosy.com/categories/7': 'IT・科学',
'https://gunosy.com/categories/8': 'グルメ',
}
page_numb = 1
f1 = open('content.csv', 'w')
contentWriter = csv.writer(f1)
f2 = open('category', 'w')
categoryWriter = csv.writer(f2)
for url, name in categories.items():
print(url)
try:
category_html = request.urlopen(url)
except HTTPError as E:
print(E)
continue
try:
category_extract = BeautifulSoup(category_html.read(),
'html.parser')
except URLError as E:
print(E)
continue
for page_index in range(1, 21):
category_page_url = ["%s?page=%s" % (url, page_index)]
# print(category_page_url)
for page_url in category_page_url:
try:
page_html = request.urlopen(page_url)
except URLError as E:
# print('Page not found', E)
continue
try:
page_extract = BeautifulSoup(
page_html.read(), 'html.parser')
except URLError as E:
print(E)
continue
for index in range(0, 20):
try:
title = page_extract.find_all(
'div', {'class': 'list_title'})[index].a.get_text()
article_text = page_extract.find_all(
'div', {'class': 'list_lead'})[index].get_text()
sum_text = title + article_text
listdata1, listdata2 = [], []
listdata1.append(title)
listdata2.append(article_text)
contentWriter.writerow(listdata1 + listdata2)
listname = []
listname.append(name)
categoryWriter.writerow(listname)
except AttributeError:
continue
print('No.%s, extraction.train(%s, %s)' % (page_numb, sum_text,
name))
model.train(sum_text, name)
page_numb = page_numb + 1
time.sleep(1)
if __name__ == "__main__":
# get articles
nb = NaiveBayes()
gunosy_category(nb)
joblib.dump(nb, 'trained_nb.m')
```
|
{
"source": "JellyWang7/leetCodeDailyPractice",
"score": 4
}
|
#### File: leetCodeDailyPractice/array101/leetCodeDay1.py
```python
def countEvenDigitNumbers(nums):
evenCount = 0
for i in range(len(nums)):
tempHolder = nums[i] // 10
currentNumberDigit = 1
while tempHolder > 0:
currentNumberDigit += 1
tempHolder = tempHolder // 10
if currentNumberDigit % 2 == 0:
evenCount += 1
return evenCount
exampleList1 = [12,345,2,6,7896]
exampleList2 = [555,901,482,1771]
exampleList3 = [252]
print(countEvenDigitNumbers(exampleList1))
print(countEvenDigitNumbers(exampleList2))
print(countEvenDigitNumbers(exampleList3))
```
#### File: leetCodeDailyPractice/array101/leetCodeDay6.py
```python
def removeDuplicates(nums):
if len(nums) == 0:
return 0
compareIndex = 0
for i in range(len(nums)):
if nums[i] != nums[compareIndex]:
compareIndex += 1
nums[compareIndex] = nums[i]
nums = nums[:compareIndex+1]
print(nums)
return compareIndex + 1
nums = [1,1,2]
removeDuplicates(nums)
nums1 = [0,0,1,1,1,2,2,3,3,4]
removeDuplicates(nums1)
```
|
{
"source": "JellyWX/soundfx-web",
"score": 2
}
|
#### File: soundfx-web/app/routes.py
```python
from flask import redirect, render_template, request, url_for, session, jsonify, abort
from app import app, discord, db
from app.models import Sound, Favorites
def int_or_none(o):
try:
return int(o)
except:
return None
@app.errorhandler(500)
def internal_error(_error):
session.clear()
return "An error has occured! We've made a report, and cleared your cache on this website. " \
"If you encounter this error again, please send us a message on Discord!"
@app.route('/')
def index():
return redirect(url_for('help_page'))
@app.route('/help/')
def help_page():
return render_template('help.html', title='Help')
@app.route('/terms/')
def terms_page():
return render_template('terms.html', title='Terms of Service')
@app.route('/privacy/')
def privacy_page():
return render_template('privacy.html', title='Privacy Policy')
@app.route('/oauth/')
def oauth():
session.clear()
return redirect(url_for('discord.login'))
@app.route('/api/search/', methods=['GET'])
def search_sounds():
query = request.args.get('query') or ''
page = int_or_none(request.args.get('page')) or 0
sounds = Sound.query.filter((Sound.public == True) & (Sound.name.like('%{}%'.format(query)))) \
.order_by(Sound.name)
max_pages = sounds.count() // app.config['RESULTS_PER_PAGE']
sounds_slice = sounds.slice(page * app.config['RESULTS_PER_PAGE'], (page + 1) * app.config['RESULTS_PER_PAGE'])
return jsonify({'sounds': [sound.to_dict() for sound in sounds_slice], 'first_page': 0, 'last_page': max_pages})
@app.route('/api/favorites/', methods=['POST', 'DELETE', 'GET'])
def favorites():
user_id = session.get('user') or discord.get('api/users/@me').json().get('user')
if user_id is None:
abort(401)
else:
if request.method == 'GET':
user_favorites = db.session.query(Favorites).join(Sound).filter(Favorites.user_id == user_id)
return jsonify({'sounds': [Sound.query.get(fav.sound_id).to_dict() for fav in user_favorites]})
elif (sound_id := request.json.get('sound_id')) is not None:
if request.method == 'DELETE':
q = db.session.query(Favorites) \
.filter_by(user_id=int(user_id), sound_id=sound_id) \
.delete(synchronize_session='fetch')
db.session.commit()
return '', 201
else: # method is POST
f = db.session.query(Favorites) \
.filter(Favorites.user_id == user_id) \
.filter(Favorites.sound_id == sound_id)
if f.first() is None:
f = Favorites(user_id=user_id, sound_id=sound_id)
db.session.add(f)
db.session.commit()
return '', 201
else:
abort(400)
@app.route('/api/user_sounds/', methods=['GET', 'DELETE'])
def user_sounds():
user_id = session.get('user') or discord.get('api/users/@me').json().get('user')
if user_id is None:
abort(401)
else:
if request.method == 'DELETE':
if (sound_id := request.args.get('sound_id')) is not None:
Sound.query \
.filter(Sound.uploader_id == user_id) \
.filter(Sound.id == sound_id) \
.delete(synchronize_session='fetch')
db.session.commit()
return '', 201
else:
abort(400)
else:
sounds = Sound.query.filter(Sound.uploader_id == user_id)
return jsonify({'sounds': [sound.to_dict() for sound in sounds]})
@app.route('/api/sound/', methods=['GET'])
def get_sound():
if (sound_id := request.args.get('sound_id')) is not None:
try:
user_id = session.get('user') or discord.get('api/users/@me').json().get('user')
except:
user_id = None
sound = Sound.query.get(sound_id)
if sound is not None:
if sound.public or sound.uploader_id == user_id:
return jsonify(sound.to_full_dict())
else:
abort(403)
else:
abort(404)
else:
abort(400)
@app.route('/dashboard/')
def dashboard():
if not discord.authorized:
return redirect(url_for('oauth'))
user = discord.get('api/users/@me').json()
session['user'] = user['id']
return render_template('dashboard.html', title='Dashboard')
```
|
{
"source": "jellzilla/hypebot",
"score": 2
}
|
#### File: hypebot/commands/bling_commands.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypebot import hype_types
from hypebot.commands import command_lib
from hypebot.core import util_lib
from hypebot.plugins import coin_lib
from hypebot.protos import channel_pb2
from hypebot.protos import user_pb2
from typing import List, Text, Tuple
@command_lib.CommandRegexParser(r'greet(?:ing)? ?(.*?)')
class GreetingPurchaseCommand(command_lib.BaseCommand):
"""Let you buy some welcome bling."""
@command_lib.HumansOnly()
def _Handle(self, channel: channel_pb2.Channel, user: user_pb2.User,
subcommand: Text) -> hype_types.CommandResponse:
greetings = self._UserGreetings(user)
subcommand = subcommand.lower()
str_range = (str(x) for x in range(len(greetings)))
if subcommand == 'list':
return self._HandleList(channel, user, greetings)
elif subcommand not in str_range:
return ('Please try again with your selection or try %sgreet list' %
self.command_prefix)
else:
selection = int(subcommand)
greeting_cost = greetings[selection][0]
if self._core.bank.ProcessPayment(user, coin_lib.FEE_ACCOUNT,
greeting_cost,
'Purchased greeting #%s' % selection,
self._Reply):
self._core.cached_store.SetValue(user, 'greetings',
greetings[selection][1])
def _UserGreetings(self,
unused_user: user_pb2.User) -> List[Tuple[int, Text]]:
"""Build list of potential greetings for the user.
Args:
unused_user: A placeholder for if someone wants to override this command
with a version that has user-specific greetings.
Returns:
List of tuples of prices / greetings that the user may purchase.
"""
return [
(1000, 'Hiya, {user}!'),
(5000, 'Who\'s afraid of the big bad wolf? Certainly not {user}!'),
(10000, 'All hail {user}!'),
(25000, 'Make way for the mighty {user}!'),
(100000,
'Wow {user}, you have {bal}, you must be fulfilled as a person!'),
]
@command_lib.LimitPublicLines(max_lines=0)
def _HandleList(self,
unused_channel: channel_pb2.Channel,
unused_user: user_pb2.User,
all_greetings: List[Tuple[int, Text]]
) -> hype_types.CommandResponse:
msgs = [
'You can purchase one of the following upgraded greetings from '
'%s' % self._core.name
]
for i, greeting in enumerate(all_greetings):
msgs.append(' %sgreet %d [%s] - \'%s\'' %
(self.command_prefix, i, util_lib.FormatHypecoins(
greeting[0]), greeting[1]))
return msgs
```
#### File: commands/league/lcs_commands.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import random
import threading
from absl import flags
import arrow
from hypebot import hype_types
from hypebot.commands import command_lib
from hypebot.core import inflect_lib
from hypebot.core import name_complete_lib
from hypebot.core import params_lib
from hypebot.data.league import messages
from hypebot.protos import channel_pb2
from hypebot.protos import message_pb2
from hypebot.protos import user_pb2
from typing import Optional, Text
LCS_TOPIC_STRING = u'#LcsHype | %s'
FLAGS = flags.FLAGS
flags.DEFINE_multi_string('spoiler_free_channels', ['#lol'], 'Channels where '
'LCS spoilers should be avoided')
@command_lib.CommandRegexParser(r'body(?: (?P<target_user>.+))?')
class BodyCommand(command_lib.BaseCommand):
"""Body by Jensen."""
DEFAULT_PARAMS = params_lib.MergeParams(
command_lib.BaseCommand.DEFAULT_PARAMS, {
'target_any': True,
})
def _Handle(
self, channel: channel_pb2.Channel, user: user_pb2.User,
target_user: Optional[user_pb2.User]) -> hype_types.CommandResponse:
if not target_user:
target_user = user_pb2.User(display_name='Jensen')
return u'Yo, %s, body these fools!' % target_user.display_name
@command_lib.CommandRegexParser(r'lcs-ch(a|u)mps (.+?)')
class LCSPlayerStatsCommand(command_lib.BaseCommand):
def _FormatChamp(self, champ):
"""Formats champ tuple to display name (wins-losses)."""
wins = champ[1].get('wins', 0)
losses = champ[1]['picks'] - wins
return '%s (%s-%s)' % (champ[0], wins, losses)
@command_lib.RequireReady('_core.esports')
def _Handle(self, channel: channel_pb2.Channel, user: user_pb2.User,
a_or_u: Text, player: Text) -> hype_types.CommandResponse:
serious_output = a_or_u == 'a'
# First, attempt to parse the query against the summoner tracker. If it
# matches a username, then use it. The summoner tracker internally queries
# Rito if it doesn't find a username, so we ignore those since LCS is on a
# separate server and we don't want name conflicts.
summoner = (self._core.summoner_tracker.ParseSummoner(
user, None, None, player) or [{}])[0]
if summoner.get('username'):
player = summoner['summoner']
player_name, player_data = self._core.esports.GetPlayerChampStats(player)
if summoner.get('username'):
player_name = '%s = %s' % (summoner['username'], player_name)
if not player_name:
return 'Unknown player. I can only give data about LCS players.'
elif not player_data or not player_data['champs']:
return '%s hasn\'t done much this split.' % player_name
best_champs = sorted(
player_data['champs'].items(),
key=lambda x: (x[1].get('wins', 0), -x[1]['picks']),
reverse=True)
if serious_output:
output = [
'%s:' % self._FormatChamp((player_name, player_data['num_games']))
]
output.extend(
['* %s' % self._FormatChamp(champ) for champ in best_champs[:5]])
return output
elif player_name == '<NAME>':
# Worst isn't the opposite order of best since more losses is worse than
# fewer wins.
worst_champ = sorted(
player_data['champs'].items(),
key=lambda x: (x[1]['picks'] - x[1].get('wins', 0), -x[1]['picks']),
reverse=True)[0]
return ('My {} is bad, my {} is worse; you guessed right, I\'m <NAME>'
.format(
self._FormatChamp(worst_champ),
'Azir' if user.user_id == 'koelze' else 'Ryze'))
else:
most_played_champ = sorted(
player_data['champs'].items(),
key=lambda x: (x[1]['picks'], x[1].get('wins', 0)),
reverse=True)[0]
return (
'My {} is fine, my {} is swell; you guessed right, I\'m {} stuck in '
'LCS hell').format(
self._FormatChamp(best_champs[0]),
self._FormatChamp(most_played_champ),
self._FormatChamp((player_name, player_data['num_games'])))
@command_lib.CommandRegexParser(r'lcs-link')
class LCSLivestreamLinkCommand(command_lib.BaseCommand):
def _Handle(self, channel: channel_pb2.Channel,
user: user_pb2.User) -> hype_types.CommandResponse:
livestream_links = self._core.esports.GetLivestreamLinks()
if livestream_links:
self._core.interface.Topic(
self._core.lcs_channel,
LCS_TOPIC_STRING % list(livestream_links.values())[0])
return ['Current LCS livestreams:'] + list(livestream_links.values())
else:
return ('I couldn\'t find any live LCS games, why don\'t you go play '
'outside?')
class LCSMatchNotificationCommand(command_lib.BaseCommand):
"""Sends a notification when matches are nearing scheduled start time."""
DEFAULT_PARAMS = params_lib.MergeParams(
command_lib.BaseCommand.DEFAULT_PARAMS,
{
# How soon before an LCS match to send a notification to subscribed
# channels.
'match_notification_sec': 15 * 60,
})
def __init__(self, *args):
super(LCSMatchNotificationCommand, self).__init__(*args)
self._core.esports.RegisterCallback(self._ScheduleAnnouncements)
self._lock = threading.Lock()
self._scheduled_announcements = []
def _ScheduleAnnouncements(self):
now = arrow.utcnow()
with self._lock:
# Clear pending announcements.
for job in self._scheduled_announcements:
self._core.scheduler.UnscheduleJob(job)
self._scheduled_announcements = []
for match in self._core.esports.schedule:
# TODO: Determine a good way to handle matches split across
# multiple days.
if match.announced:
continue
time_until_match = match.time - now
seconds_until_match = (
time_until_match.days * 86400 + time_until_match.seconds)
if seconds_until_match > 0:
self._scheduled_announcements.append(
self._core.scheduler.InSeconds(
seconds_until_match - self._params.match_notification_sec,
self._AnnounceMatch, match))
def _AnnounceMatch(self, match):
match.announced = True
topic = 'lcs_match'
if self._core.esports.brackets[match.bracket_id].is_playoffs:
topic = 'lcs_match_playoffs'
blue = self._core.esports.MatchTeamName(match.blue)
red = self._core.esports.MatchTeamName(match.red)
if blue and red:
match_name = '%s v %s' % (blue, red)
else:
match_name = 'An LCS match'
call_to_action_str = 'Get #Hyped!'
livestream_link = self._core.esports.GetLivestreamLinks().get(
match.match_id)
if livestream_link:
call_to_action_str = 'Watch at %s and get #Hyped!' % livestream_link
self._core.interface.Topic(self._core.lcs_channel,
LCS_TOPIC_STRING % livestream_link)
self._core.PublishMessage(
topic, u'%s is starting soon. %s' % (match_name, call_to_action_str))
@command_lib.CommandRegexParser(r'lcs-p(?:ick)?b(?:an)?-?(\w+)? (.+?) ?([v|^]?)'
)
class LCSPickBanRatesCommand(command_lib.BaseCommand):
"""Better stats than LCS production."""
def _PopulatePickBanChampStr(self, champ_str, champ, stats, subcommand,
num_games):
pb_info = {}
pb_info['champ'] = champ
pb_info['rate_str'] = subcommand[:-1].lower()
pb_info['appear_str'] = ''
if subcommand == 'all':
pb_info['appear_str'] = '{:4.3g}% pick+ban rate, '.format(
(stats['bans'] + stats['picks']) / num_games * 100)
# For 'all' we show both pick+ban rate and win rate
pb_info['rate_str'] = 'win'
per_subcommand_data = {
'ban': {
'rate': stats['bans'] / num_games * 100,
'stat': stats['bans'],
'stat_desc': 'ban',
'include_win_loss': False
},
'pick': {
'rate': stats['picks'] / num_games * 100,
'stat': stats['picks'],
'stat_desc': 'game',
'include_win_loss': True
},
'win': {
'rate':
0 if not stats['picks'] else stats['wins'] / stats['picks'] *
100,
'stat':
stats['picks'],
'stat_desc':
'game',
'include_win_loss':
True
}
}
pb_info.update(per_subcommand_data[pb_info['rate_str']])
pb_info['stat_str'] = inflect_lib.Plural(pb_info['stat'],
pb_info['stat_desc'])
pb_info['win_loss_str'] = ''
if pb_info['include_win_loss']:
pb_info['win_loss_str'] = ', %s-%s' % (stats['wins'],
stats['picks'] - stats['wins'])
return champ_str.format(**pb_info)
@command_lib.RequireReady('_core.esports')
def _Handle(self, channel, user, region, subcommand, order):
if region:
region = region.upper()
region_msg = 'in %s' % region
else:
region = 'all'
region_msg = 'across all LCS regions'
subcommand = subcommand.lower()
if subcommand == 'unique':
num_unique, num_games = self._core.esports.GetUniqueChampCount(region)
if num_games == 0:
return 'I don\'t have any data =(.'
avg_unique_per_game = num_games / num_unique
return ('There have been {} unique champs [1 every {:.1f} '
'games] picked or banned {}.').format(num_unique,
avg_unique_per_game,
region_msg)
elif subcommand in ('all', 'bans', 'picks', 'wins'):
specifier_to_sort_key_fn = {
'all': lambda stats: stats['picks'] + stats['bans'],
'bans': lambda stats: stats['bans'],
'picks': lambda stats: stats['picks'],
'wins': lambda stats: stats['wins'] / stats['picks'],
}
sort_key_fn = specifier_to_sort_key_fn[subcommand]
descending = order != '^'
order_str = 'Top' if descending else 'Bottom'
rate_str = subcommand[:-1].title()
if subcommand == 'all':
rate_str = 'Pick+Ban'
num_games, top_champs = self._core.esports.GetTopPickBanChamps(
region, sort_key_fn, descending)
min_game_str = inflect_lib.Plural(max(1, num_games / 20), 'game')
responses = [
'%s Champs by %s Rate %s [min %s].' %
(order_str, rate_str, region_msg, min_game_str)
]
max_champ_len = max(len(x[0]) for x in top_champs)
champ_str = ('{champ:%s} - {appear_str}{rate:4.3g}%% {rate_str} rate '
'({stat_str}{win_loss_str})' % max_champ_len)
for champ, stats in top_champs:
responses.append(
self._PopulatePickBanChampStr(champ_str, champ, stats, subcommand,
num_games))
return responses
canonical_name, pb_data = self._core.esports.GetChampPickBanRate(
region, subcommand)
if not canonical_name:
return ('While you may want {0} to be a real champ, your team doesn\'t '
'think {0} is a real champ.').format(subcommand)
if pb_data['num_games'] == 0 or ('picks' not in pb_data and
'bans' not in pb_data):
return '%s is not very popular %s.' % (canonical_name, region_msg)
appear_rate = (pb_data['bans'] + pb_data['picks']) / pb_data['num_games']
win_msg = ' with a {:.0%} win rate'
if pb_data['picks'] == 0:
win_msg = ''
else:
win_msg = win_msg.format(pb_data['wins'] / pb_data['picks'])
losses = pb_data['picks'] - pb_data['wins']
return '{} has appeared in {:.1%} of games ({}, {}){} ({}-{}) {}.'.format(
canonical_name, appear_rate, inflect_lib.Plural(pb_data['bans'], 'ban'),
inflect_lib.Plural(pb_data['picks'], 'pick'), win_msg, pb_data['wins'],
losses, region_msg)
@command_lib.CommandRegexParser(r'schedule(full)? ?(.*?)')
class LCSScheduleCommand(command_lib.BaseCommand):
DEFAULT_PARAMS = params_lib.MergeParams(
command_lib.BaseCommand.DEFAULT_PARAMS, {
'num_games': 5,
'full_num_games': 10,
})
@command_lib.RequireReady('_core.esports')
def _Handle(self, channel, user, full, subcommand):
include_playoffs = True
# Avoid spoilers in spoiler-free channels.
if channel.id in FLAGS.spoiler_free_channels:
include_playoffs = False
subcommand = subcommand.upper()
num_games = self._params.num_games
if full == 'full':
num_games = self._params.full_num_games
schedule, subcommand = self._core.esports.GetSchedule(
subcommand or 'All', include_playoffs, num_games)
lines = ['%s Upcoming Matches' % subcommand]
lines.extend(schedule)
# Print a disclaimer if we (potentially) omitted any matches.
if not include_playoffs and len(schedule) != num_games:
lines.append('(Note: Some matches may be omitted for spoiler reasons)')
return lines
@command_lib.CommandRegexParser(r'standings ?(.*?)')
class LCSStandingsCommand(command_lib.BaseCommand):
DEFAULT_PARAMS = params_lib.MergeParams(
command_lib.BaseCommand.DEFAULT_PARAMS, {
'default_region': 'NA',
})
@command_lib.RequireReady('_core.esports')
def _Handle(self, channel, user, query):
# Avoid spoilers in spoiler-free channels.
if channel.id in FLAGS.spoiler_free_channels:
return 'pls no spoilerino'
query = query.split()
league = query[0] if query else self._params.default_region
bracket = ' '.join(query[1:]) if len(query) > 1 else 'regular'
standings = self._core.esports.GetStandings(league, bracket)
cards = []
for standing in standings:
has_ties = any([team.ties for team in standing['teams']])
format_str = '{0.wins}-{0.losses}'
if has_ties:
format_str += '-{0.ties}, {0.points}'
card = message_pb2.Card(
header={
'title':
standing['league'].name,
'subtitle':
'%s (%s)' % (standing['bracket'].name,
'W-L-D, Pts' if has_ties else 'W-L'),
},
# We will place the top-n teams into the first field separated by
# newlines so that we don't have extra whitespace.
visible_fields_count=1)
team_strs = [
('*{0.rank}:* {0.team.abbreviation} (%s)' % format_str).format(team)
for team in standing['teams']
]
# If there are a lot of teams in the bracket, only display the top few.
# 6 is chosen since many group stages and Grumble consist of 6 team
# brackets.
if len(team_strs) > 6:
# The number placed into the visible field is n-1 so that we don't only
# show a single team in the collapsed section.
card.fields.add(text='\n'.join(team_strs[:5]))
card.fields.add(text='\n'.join(team_strs[5:]))
else:
card.fields.add(text='\n'.join(team_strs))
cards.append(card)
return cards
@command_lib.CommandRegexParser(r'results(full)? ?(.*?)')
class LCSResultsCommand(command_lib.BaseCommand):
DEFAULT_PARAMS = params_lib.MergeParams(
command_lib.BaseCommand.DEFAULT_PARAMS, {
'num_games': 5,
'full_num_games': 10,
})
@command_lib.RequireReady('_core.esports')
def _Handle(self, channel, user, full, region):
# Avoid spoilers in spoiler-free channels.
if channel.id in FLAGS.spoiler_free_channels:
return 'pls no spoilerino'
num_games = self._params.num_games
if full == 'full':
num_games = self._params.full_num_games
schedule, region = self._core.esports.GetResults(region or 'All', num_games)
schedule.insert(0, '%s Past Matches' % region)
return schedule
@command_lib.CommandRegexParser(r'roster(full)?(?:-(\w+))? (.+?)')
class LCSRosterCommand(command_lib.BaseCommand):
"""Display players and their roles."""
# A map of actual player names to what their name should be displayed as.
# You know, for memes.
NAME_SUBSTITUTIONS = {
'Revolta': 'Travolta',
'MikeYeung': 'Mike "<NAME>" Yeung',
}
@command_lib.RequireReady('_core.esports')
def _Handle(self, channel, user, include_subs, region, team):
teams = self._core.esports.teams
if region:
league = self._core.esports.leagues[region]
if not league:
return 'Unknown region'
teams = {team.team_id: team for team in league.teams}
teams = name_complete_lib.NameComplete(
dict({team.name: team_id for team_id, team in teams.items()}, **{
team.abbreviation: team_id for team_id, team in teams.items()
}), teams)
team = teams[team]
if not team:
return 'Unknown team.'
response = ['%s Roster:' % team.name]
players = [
player for player in team.players
if not player.is_substitute or include_subs
]
role_order = {'Top': 0, 'Jungle': 1, 'Mid': 2, 'Bottom': 3, 'Support': 4}
players.sort(key=lambda p: role_order.get(p.position, 5))
for player in players:
response.append('%s - %s' % (self.NAME_SUBSTITUTIONS.get(
player.summoner_name, player.summoner_name), player.position))
return response
@command_lib.CommandRegexParser(r'rooster(full)? (.+?)')
class LCSRoosterCommand(command_lib.BaseCommand):
def _Handle(self, channel, user, include_sub, team):
team = team.upper()
roles = ['Top', 'Jungle', 'Mid', 'ADC', 'Support']
if include_sub:
pos, role = random.choice(list(enumerate(roles)))
roles.insert(int(pos) + 1, '%s (Sub)' % role)
players = random.sample(messages.ROOSTERS, len(roles))
responses = ['%s Roosters:' % team]
for role, player in zip(roles, players):
responses.append('%s - %s' % (player, role))
return responses
```
#### File: hypebot/core/activity_tracker.py
```python
import collections
import hashlib
import json
import threading
from typing import Dict, Text
from absl import logging
from hypebot.core import schedule_lib
from hypebot.protos import channel_pb2
from hypebot.protos import user_pb2
# TODO: Migrate util_lib.UserTracker behavior to ActivityTracker.
# TODO: Migrate BaseCommand._RateLimit tracking to ActivityTracker.
class ActivityTracker(object):
"""A class for tracking user activity."""
def __init__(self, scheduler: schedule_lib.HypeScheduler):
self._lock = threading.Lock()
self._ResetDelta()
# TODO: This will lose up to 30m of activity on restart.
scheduler.FixedRate(5, 30 * 60, self._LogAndResetDelta)
def RecordActivity(self, channel: channel_pb2.Channel, user: user_pb2.User,
command: Text):
"""Records that a user issued a command in a channel."""
with self._lock:
self._users[user.user_id] += 1
if channel.visibility == channel_pb2.Channel.PUBLIC:
self._public_channels[channel.id] += 1
elif channel.visibility == channel_pb2.Channel.PRIVATE:
self._private_channels[channel.id] += 1
elif channel.visibility == channel_pb2.Channel.SYSTEM:
self._system_callbacks[channel.id] += 1
else:
raise ValueError('Unknown channel_pb2.Channel visibility: %s' %
channel.visibility)
self._commands[command] += 1
def _ResetDelta(self):
self._commands = collections.defaultdict(lambda: 0) # type: Dict[Text, int]
self._users = collections.defaultdict(lambda: 0) # type: Dict[Text, int]
self._public_channels = collections.defaultdict(
lambda: 0) # type: Dict[Text, int]
self._private_channels = collections.defaultdict(
lambda: 0) # type: Dict[Text, int]
self._system_callbacks = collections.defaultdict(
lambda: 0) # type: Dict[Text, int]
def _LogAndResetDelta(self):
"""Logs the activity delta since the last call, and resets all counters."""
delta = None
with self._lock:
delta = {
'users': self._users,
'channels': {
'public': self._public_channels,
'private': self._private_channels,
'system': self._system_callbacks,
},
'commands': self._commands,
}
self._ResetDelta()
delta['users'] = _HashKeys(delta['users'])
delta['channels']['public'] = _HashKeys(delta['channels']['public'])
delta['channels']['private'] = _HashKeys(delta['channels']['private'])
delta['channels']['system'] = _HashKeys(delta['channels']['system'])
# TODO: Write to a structured logging service or a TSDB.
logging.info('Command deltas:\n%s', json.dumps(delta))
def _HashKeys(dictionary: Dict[Text, int]) -> Dict[Text, int]:
return {
hashlib.sha1(key.encode('utf-8')).hexdigest()[:8]: value
for (key, value) in dictionary.items()
}
```
#### File: hypebot/core/inflect_lib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inflection
from typing import AnyStr
def AddIndefiniteArticle(noun: AnyStr) -> AnyStr:
"""Formats a noun with an appropriate indefinite article.
Does not handle advanced cases, e.g. "hour" or other soft consonents.
Args:
noun: A string representing a noun.
Returns:
A string containing noun prefixed with an indefinite article, e.g.,
"a thing" or "an object".
"""
if noun[0] in ('a', 'e', 'i', 'o', 'u'):
return 'an ' + noun
else:
return 'a ' + noun
def Ordinalize(number: int) -> AnyStr:
"""Converts an int into the ordinal string representation.
Args:
number: Number to convert.
Returns:
Ordinal representation. E.g., 1st, 2nd, 3rd.
"""
if 10 < number < 20:
return 'th'
elif number % 10 == 1:
return 'st'
elif number % 10 == 2:
return 'nd'
elif number % 10 == 3:
return 'rd'
else:
return 'th'
def Plural(quantity: int, noun: AnyStr, plural: AnyStr = None) -> AnyStr:
"""Formats a quanity of a noun correctly.
Args:
quantity: Amount of noun to format.
noun: Singular form of noun to format.
plural: Optional plural form of noun if it is too special for us to handle
with normal English rules.
Returns:
Quantity of noun: e.g., 0 houses, 1 house, 2 houses.
"""
if quantity != 1:
noun = plural if plural else inflection.pluralize(noun)
return '%d %s' % (quantity, noun)
```
#### File: hypebot/interfaces/interface_lib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
from absl import logging
from hypebot import hype_types
from hypebot.core import params_lib
from hypebot.protos import user_pb2
from six import with_metaclass
from typing import Optional, Text
class BaseChatInterface(with_metaclass(abc.ABCMeta)):
"""The interface base class.
An `interface` allows hypebot to communicate with a chat application (e.g.,
IRC, Discord, FireChat). This is an application-agnostic way of sending and
receiving messages and information about users.
"""
DEFAULT_PARAMS = params_lib.HypeParams({
# Display name used when chatting.
'name': 'chatbot',
})
def __init__(self, params):
self._params = params_lib.HypeParams(self.DEFAULT_PARAMS)
self._params.Override(params)
self._params.Lock()
self._channels = set()
def RegisterHandlers(self, on_message_fn, user_tracker, user_prefs):
"""Register handlers from the bot onto the interface.
Allows the interface to communicate asynchronously to the bot when messages
or user information comes.
Args:
on_message_fn: {callable(Channel, User, message)} Function that will be
called in response to an incoming message.
user_tracker: {UserTracker} Where to store results of Who/WhoAll requests.
user_prefs: {SyncedDict} Persistent user preferences.
"""
self._channels = set()
self._on_message_fn = on_message_fn
self._user_tracker = user_tracker
self._user_prefs = user_prefs
def Join(self, channel: hype_types.Channel):
"""Bring the power of hype to the desired channel.
The base class only maintains a list of active channels. Subclasses are
responsible for actually joining the channel.
Args:
channel: {Channel} channel name to join.
"""
self._channels.add(channel.id)
def Leave(self, channel: hype_types.Channel):
"""We do not condone this behavior.
The base class only maintains a list of active channels. Subclasses are
responsible for actually leaving the channel.
Args:
channel: {Channel} channel to leave.
"""
if channel.id in self._channels:
self._channels.remove(channel.id)
else:
logging.warning('Tried to leave channel that I never joined: %s', channel)
@abc.abstractmethod
def Loop(self):
"""Listen to messages from the chat application indefinitely.
Loop steals the current thread.
"""
raise NotImplementedError()
def FindUser(self, query: Text) -> Optional[user_pb2.User]:
"""Find user with the given name or user_id.
Attempts to find a user proto for the given query. Some interfaces provide
an annotation syntax to allow specifying a specific user. Since these aren't
universal, the Interface will convert it into the user_id for the command.
However, we would also like to support referring to a user by their display
name directly. If specifying the display name, it is possible for it not to
be unique.
Args:
query: Either user_id or display name of user.
Returns:
The full user proto of the desired user or None if no user exists or the
query does not resolve to a unique user.
"""
users = self._user_tracker.AllUsers()
matches = []
for user in users:
if user.user_id == query:
return user
if user.display_name.lower() == query.lower():
matches.append(user)
if len(matches) == 1:
return matches[0]
return None
@abc.abstractmethod
def WhoAll(self):
"""Request that all users be added to the user tracker."""
raise NotImplementedError()
# TODO: Eliminate Optional from the message type.
@abc.abstractmethod
def SendMessage(self, channel: hype_types.Channel,
message: Optional[hype_types.Message]):
"""Send a message to the given channel.
Args:
channel: channel to receive message.
message: message to send to the channel.
"""
raise NotImplementedError()
@abc.abstractmethod
def SendDirectMessage(self, user: user_pb2.User, message: hype_types.Message):
raise NotImplementedError()
# TODO: Eliminate Optional from the message type.
@abc.abstractmethod
def Notice(self, channel: hype_types.Channel, message: hype_types.Message):
"""Send a notice to the channel.
Some applications (IRC) support a different type of message to a channel.
This is used to broadcast a message not in response to a user input. E.g.,
match start time or scheduled bet resolution.
Args:
channel: channel to send notice.
message: notice to send to the channel.
"""
raise NotImplementedError()
@abc.abstractmethod
def Topic(self, channel: hype_types.Channel, new_topic: Text):
"""Changes the "topic" of channel to new_topic.
Args:
channel: channel to change the topic of.
new_topic: new topic to set.
"""
raise NotImplementedError()
```
#### File: hypebot/plugins/coin_lib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import math
import numbers
import random
import re
import threading
from absl import logging
from hypebot.core import schedule_lib
from hypebot.core import util_lib
from hypebot.data import messages
from hypebot.protos import bank_pb2
from hypebot.protos import bet_pb2
from hypebot.protos import user_pb2
import six
# pylint: disable=line-too-long
# pylint: enable=line-too-long
from google.protobuf import json_format
# "Accounts" where various transactions end up
BOOKIE_ACCOUNT = user_pb2.User(user_id='_hypebank', display_name='HypeBank')
FEE_ACCOUNT = BOOKIE_ACCOUNT
MINT_ACCOUNT = BOOKIE_ACCOUNT
SCHOLARSHIP_ACCOUNT = user_pb2.User(
user_id='_hypescholarship', display_name='HypeScholarship')
SUBSCRIPTION_ACCOUNT = BOOKIE_ACCOUNT
# pyformat: disable
HYPECENTS = frozenset([
BOOKIE_ACCOUNT.user_id,
FEE_ACCOUNT.user_id,
MINT_ACCOUNT.user_id,
SCHOLARSHIP_ACCOUNT.user_id,
SUBSCRIPTION_ACCOUNT.user_id,
])
# pyformat: enable
class Thievery(object):
"""Allows nefarious behavior.
The more you steal, the more you get caught. The more you are a victim, the
more you catch peeps.
We keep a score which is an exponential decay of the sum of past successful
theft amounts for victims and thiefs. Your percent of the total score impacts
future theft chances. Hypebot has a fixed large number in each pool to prevent
solitary thefts from overloading the system. Periodically, all scores are
reduced except hypebot's.
"""
# Rate to decay scores. I.e., score_t+1 = score_t * DECAY_RATE
_DECAY_RATE = 0.75
# Arrow object specifying when decay should occur.
_DECAY_TIME = util_lib.ArrowTime(2)
# Baseline percentage of victim balance that can be stolen half of the time.
_BASE_BALANCE_PERCENT = 0.02
# Fixed thief / victim score for hypebot.
_HYPEBOT_SCORE = 1000
def __init__(self, store, bank, bot_name, timezone):
self._store = store
self._bank = bank
self._bot_name = bot_name
self._protected_peeps = [self._bot_name] + list(HYPECENTS)
self._scheduler = schedule_lib.HypeScheduler(timezone)
self._scheduler.DailyCallback(
# Ensures we schedule this event at 2am local time instead of UTC.
self._DECAY_TIME.to(timezone),
self._store.RunInTransaction,
self._DecayAllScores)
def Rob(self, thief, victim, amount, msg_fn):
"""Attempt a robbery."""
if amount < 0:
msg_fn(None, 'Did you mean !hc gift?')
return
if victim.user_id in self._protected_peeps:
msg_fn(None, 'The Godfather protects his family.')
self._bank.ProcessPayment(
thief,
user_pb2.User(user_id=self._bot_name, display_name=self._bot_name),
500, 'In Soviet Russia, %s steals from you.' % self._bot_name, msg_fn)
return
victim_balance = self._bank.GetBalance(victim)
if victim_balance <= 0:
msg_fn(None, 'You cannot milk a dead cow.')
return
thief_alert = self._GetPDF('thief')[thief.user_id]
victim_alert = self._GetPDF('victim')[victim.user_id]
offset = self._BASE_BALANCE_PERCENT * (1 - thief_alert - victim_alert)
failure_chance = self._Sigmoid(amount / victim_balance, offset)
rob_attempt_score = random.random()
logging.info('(%s: %0.2f, %s: %0.2f) %s of %s attempt %0.2f >? %0.2f',
thief, thief_alert, victim, victim_alert, amount,
victim_balance, rob_attempt_score, failure_chance)
if rob_attempt_score < failure_chance:
self._bank.ProcessPayment(thief, SCHOLARSHIP_ACCOUNT,
min(self._bank.GetBalance(thief), amount),
'Victim scholarship fund', msg_fn)
self._DistributeToPastVictims(msg_fn)
if (rob_attempt_score < failure_chance * thief_alert /
(thief_alert + victim_alert + 1e-6)):
msg_fn(None, '%s is a known thief and was caught.' % thief.display_name)
else:
msg_fn(
None, '%s is on high alert and caught %s.' %
(victim.display_name, thief.display_name))
return
# TODO: Fold ProcessPayment into the UpdateScores tx.
# We don't worry about the victim having insufficient funds since there is a
# 0% chance of stealing 100% of someone's money.
if self._bank.ProcessPayment(victim, thief, amount, 'Highway robbery',
msg_fn):
self._store.RunInTransaction(self._UpdateScores, thief, victim, amount)
formatted_amount = util_lib.FormatHypecoins(amount)
msg_fn(
None, '%s stole %s from %s' %
(thief.display_name, formatted_amount, victim.display_name))
# We privmsg the victim to make sure they know who stole their hypecoins.
msg_fn(
victim, 'You\'ve been robbed! %s stole %s' %
(thief.display_name, formatted_amount))
def _Sigmoid(self, value, offset, scale=200.0):
return 1 / (1 + math.exp(-scale * (value - offset)))
def _GetScores(self, collection, tx=None):
"""Gets scores for collection.
Args:
collection: {string} which set of scores to get.
tx: {storage_lib.HypeTransaction} an optional transaction to pass along to
GetJsonValue.
Returns:
{dict<string, float>} scores keyed by name.
"""
scores = self._store.GetJsonValue(self._bot_name, 'scores:%s' % collection,
tx)
return collections.defaultdict(
int, scores or {self._bot_name: self._HYPEBOT_SCORE})
def _GetPDF(self, collection):
"""Gets probability density function of scores for collection."""
scores = self._GetScores(collection)
total_score = sum(scores.values())
pdf = {peep: score / total_score for peep, score in scores.items()}
return collections.defaultdict(float, pdf)
def _AddToScore(self, collection, name, amount, tx=None):
"""Add {amount} to {names}'s score in {collection}."""
scores = self._GetScores(collection, tx)
scores[name] += amount
logging.info('Updating %s scores: %s', collection, scores)
self._store.SetJsonValue(self._bot_name, 'scores:%s' % collection, scores,
tx)
def _UpdateScores(self, thief, victim, amount, tx=None):
self._AddToScore('thief', thief.user_id, amount, tx)
self._AddToScore('victim', victim.user_id, amount, tx)
return True
def _DecayAllScores(self, tx=None):
self._DecayScores('thief', tx)
self._DecayScores('victim', tx)
return True
def _DecayScores(self, collection, tx=None):
"""Decay scores for {collection}."""
scores = {
peep: int(score * self._DECAY_RATE)
for peep, score in self._GetScores(collection, tx).items()
if score > 0
}
scores[self._bot_name] = self._HYPEBOT_SCORE
logging.info('Updating %s scores: %s', collection, scores)
self._store.SetJsonValue(self._bot_name, 'scores:%s' % collection, scores,
tx)
def _DistributeToPastVictims(self, msg_fn):
"""Distribute funds in scholarship account to past victims."""
victim_scores = self._GetPDF('victim')
scholarship_balance = self._bank.GetBalance(SCHOLARSHIP_ACCOUNT)
self._bank.ProcessPayment(
SCHOLARSHIP_ACCOUNT,
[user_pb2.User(user_id=v) for v in victim_scores.keys()],
scholarship_balance,
'Victim scholarship fund',
msg_fn,
merchant_weights=victim_scores.values())
class Bookie(object):
"""Class for managing a betting ledger.
The data-model used by Bookie is rows mapping to dicts serialized as strings.
"""
_BET_SUBKEY = 'bets'
_ledger_lock = threading.RLock()
def __init__(self, store, bank, inventory):
self._store = store
self._bank = bank
self._inventory = inventory
def LookupBets(self, game, user: user_pb2.User = None, resolver=None):
"""Returns bets for game, optionally filtered by user or resolver."""
with self._ledger_lock:
bets = self._GetBets(game)
# Filtering is done slightly strangely, but it ensures that the same
# structure is kept regardless of filtering and that if a filter was given
# but the game has no matches for that filter, we return an empty dict
if user:
user_id = user.user_id
bets = {user_id: bets[user_id]} if user_id in bets else {}
if resolver:
bets = {
user_id: [bet for bet in user_bets if bet.resolver == resolver
] for user_id, user_bets in bets.items()
}
bets = collections.defaultdict(list, bets)
return bets
# TODO: PlaceBet needs to be fixed to throw on error.
def PlaceBet(self, game, bet, msg_fn, more=False):
"""Places a bet for game on behalf of user.
PlaceBet will withdraw funds from the bank to fund the bet.
Args:
game: The game this bet is for.
bet: Bet proto describing what bet to place.
msg_fn: {callable(channel, msg)} function to send messages.
more: A boolean that decides if the bet amount should be added to any
current bets.
Returns:
{boolean} whether bet placing was successful or not.
"""
return self._store.RunInTransaction(self._PlaceBet, game, bet, more, msg_fn)
def _PlaceBet(self, game, bet, more, msg_fn, *unused_args, **kwargs):
"""Internal version of PlaceBet to be run with a transaction."""
bet.game = game.name
with self._ledger_lock:
tx = kwargs.get('tx')
if not tx:
logging.error('_PlaceBet can only be called with a transaction.')
return
bets = self._GetBets(game.name, tx=tx)
prior_bet = None
for b in bets[bet.user.user_id]:
if bet.target == b.target:
prior_bet = b
logging.info('%s has a prior_bet for %s:%s => %s', bet.user,
game.name, bet.target, prior_bet)
break
if more and prior_bet:
bet.amount += prior_bet.amount
# Special handling to ensure we don't go overboard for lottery.
if game.name == 'lottery':
bet.amount = game.CapBet(bet.user, bet.amount, bet.resolver)
net_amount = bet.amount - (prior_bet.amount if prior_bet else 0)
if net_amount < 0:
msg_fn(bet.user,
'Money on the table is not yours. Try a higher amount.')
return False
if prior_bet:
details = 'Bet updated. Replaced %s with %s' % (
game.FormatBet(prior_bet), game.FormatBet(bet))
else:
details = 'Bet placed. %s' % game.FormatBet(bet)
if not self._bank.ProcessPayment(bet.user, BOOKIE_ACCOUNT, net_amount,
details, msg_fn):
return False
# We do this after the payment processing so that we don't delete bets if
# we can't correctly update them
if prior_bet:
bets[bet.user.user_id].remove(prior_bet)
bets[bet.user.user_id].append(bet)
self._SetBets(game.name, bets, tx=tx)
return True
def SettleBets(self, game, resolver, msg_fn, *args, **kwargs):
"""Settles all bets for game, clearing the ledger and paying out winnings.
Args:
game: The game to settle bets for.
resolver: The bot trying to settle bets. Used to filter out bets placed by
other bots which this bot shouldn't resolve.
msg_fn: {callable(channel, msg)} function to send user messages.
*args: Additional positional arguments to pass to settlement_fn.
**kwargs: Additional keyword arguments to pass to settlement_fn.
Returns:
List of messages to send as notifications of settling bets.
"""
return self._store.RunInTransaction(self._SettleBets, game, resolver,
msg_fn, *args, **kwargs)
def _SettleBets(self, game, resolver, msg_fn, *args, **kwargs):
"""Internal version of SettleBets to be run with a transaction."""
with self._ledger_lock:
tx = kwargs.get('tx')
if not tx:
logging.error('_SettleBets can only be called with a transaction.')
return []
bets = self._GetBets(game.name, tx)
if not bets:
logging.warning('Tried to settle bets for %s, but no bets were found',
game.name)
return []
# Filter out bets with 'resolver' set and != the current bot
unresolved_bets = collections.defaultdict(list)
filtered_bets = collections.defaultdict(list)
for user_id, user_bets in bets.items():
for bet in user_bets:
if not bet.resolver or bet.resolver == resolver:
filtered_bets[user_id].append(bet)
else:
unresolved_bets[user_id].append(bet)
if not filtered_bets:
logging.info('No bets found for resolver %s', resolver)
return []
winner_info, unused_bets, notifications = game.SettleBets(
filtered_bets, msg_fn, *args, **kwargs)
# Merge bets that were filtered out of the pool with bets unused by the
# game itself. We can't use a raw update here since we need to merge the
# lists of bets for users with bets in both dicts.
for user_id, user_bets in unresolved_bets.items():
if user_id in unused_bets:
unused_bets[user_id] += user_bets
else:
unused_bets[user_id] = user_bets
self._SetBets(game.name, unused_bets, tx=tx)
for winner, winnings in winner_info:
if isinstance(winnings, numbers.Number):
if not self._bank.ProcessPayment(BOOKIE_ACCOUNT, winner, winnings,
'Gambling payout', msg_fn):
logging.error('Couldn\'t pay %s %s for winning %s', winner, winnings,
game.name)
else:
self._inventory.AddItem(winner, winnings)
return notifications
def _GetBets(self, row, tx=None):
json_bets = self._store.GetJsonValue(row, self._BET_SUBKEY, tx) or {}
bets = {
u: [json_format.ParseDict(b, bet_pb2.Bet()) for b in user_bets
] for u, user_bets in json_bets.items()
}
return collections.defaultdict(list, bets)
def _SetBets(self, row, bets, tx=None):
json_bets = {
u: [json_format.MessageToDict(b) for b in user_bets
] for u, user_bets in bets.items()
}
return self._store.SetJsonValue(row, self._BET_SUBKEY, json_bets, tx=tx)
# TODO: Allow holds on accounts to ensure coins will exist for a
# ProcessPayment in the near future.
class Bank(object):
"""Class for managing user balances of hypecoins in the HypeBank."""
_BALANCE_SUBKEY = 'bank:balance'
_TRANSACTION_SUBKEY = 'bank:transaction'
_MIN_OVERDRAFT_FEE = 5
_MAX_OVERDRAFT_FEE_PERCENT = 0.05
# Bank class also might want a way to determine if a user has a balance or not
def __init__(self, store, bot_name):
self._store = store
self._bot_name = bot_name
self._withdraw_lock = threading.RLock()
def GetBalance(self, user):
balance = self._store.GetValue(user.user_id, self._BALANCE_SUBKEY)
if not balance:
return 0
return util_lib.SafeCast(balance, int, 0)
def GetUserBalances(self, plebs_only=False):
"""Returns dict of user_ids mapping to their balance for all users."""
user_balances = self._store.GetSubkey(self._BALANCE_SUBKEY)
# pylint: disable=g-complex-comprehension
return {
user_id: util_lib.SafeCast(balance, int, 0)
for user_id, balance in user_balances
if (not plebs_only or user_id not in HYPECENTS) and
not user_id.startswith('http')
}
# pylint: enable=g-complex-comprehension
def GetTransactions(self, user):
json_entries = self._store.GetHistoricalValues(user.user_id,
self._TRANSACTION_SUBKEY, 5)
return [
json_format.ParseDict(entry, bank_pb2.LedgerEntry())
for entry in json_entries
]
def GetBankStats(self, plebs_only=False):
"""Returns the total number of accounts and the sum of all balances."""
user_balances = self.GetUserBalances(plebs_only=plebs_only)
balance_sum = sum(user_balances.values())
return len(user_balances), balance_sum
def MintNewHypeCoins(self):
"""Creates new HypeCoins if MINT_ACCOUNT is running low.
Specifically, if the MINT_ACCOUNT has less than 25% of the total HypeCoin
market size, this method will mint new coins scaling linearly with the
number of users, and logarithmically with the total market size.
"""
mint_balance = self.GetBalance(MINT_ACCOUNT)
num_users, coins_in_circulation = self.GetBankStats()
if mint_balance >= coins_in_circulation // 4:
logging.info(
'Mint balance (%s) >= 25%% of market (%s), not minting new coins',
util_lib.FormatHypecoins(mint_balance),
util_lib.FormatHypecoins(coins_in_circulation))
return
num_coins_to_mint = max(
5000, int(math.log(coins_in_circulation, 2) * num_users * 1000))
logging.info('Minting %s', util_lib.FormatHypecoins(num_coins_to_mint))
entry = bank_pb2.LedgerEntry(
counterparty={
'user_id': '_ether',
'display_name': 'Ether'
},
amount=num_coins_to_mint,
details='Minting')
entry.create_time.GetCurrentTime()
if not self._Deposit(MINT_ACCOUNT, num_coins_to_mint, entry, None):
logging.error('Minting %s failed',
util_lib.FormatHypecoins(num_coins_to_mint))
def ParseAmount(self, user, amount_str, msg_fn):
"""Read user's minds.
Convert a string into an amount of hypecoins.
Args:
user: {string} user name.
amount_str: {string} amount as string.
msg_fn: {callable(channel, msg)} function to send messages.
Returns:
{Optional[int]} Amount as int or None if it can't be parsed.
"""
# Parser handlers.
# Can return either an int value or a string. Strings will be replied to the
# user and replaced with a None value.
def _IntAmount(match, unused_balance):
return int(match.groups()[0])
def _HumanIntAmount(match, unused_balance):
try:
return int(util_lib.UnformatHypecoins(match.groups()[0]))
except ValueError:
return None
def _HexAmount(match, unused_balance):
return int(match.groups()[0], 16)
def _RandomBalance(unused_match, balance):
return random.randint(1, balance)
def _MemeTeam(unused_match, unused_balance):
# TODO: Determine a way to trigger commands at will.
# self.Meme(channel, None, None)
return 'ayyy'
# List of [regex, parser handler].
parsers = (
(r'%s$' % self._bot_name,
lambda x, y: 'You can\'t put a price on this bot.'),
(r'(dank)? ?memes?$', _MemeTeam),
(r'(-?[0-9]+)$', _IntAmount),
(r'(?:0x)([0-9,a-f]+)$', _HexAmount),
(r'(a )?positive int$', _RandomBalance),
(r'(-?[0-9.]+ ?[A-Za-z]+)$', _HumanIntAmount),
)
balance = self.GetBalance(user)
amount_str = amount_str.lower().strip()
if amount_str in messages.GAMBLE_STRINGS:
return balance
amount = None
for parser in parsers:
match = re.match(parser[0], amount_str)
if match:
amount = parser[1](match, balance)
break
if amount is None:
amount = 'Unrecognized amount.'
if isinstance(amount, six.string_types):
msg_fn(None, amount)
amount = None
return amount
def FineUser(self, user, amount, details, msg_fn):
return self.ProcessPayment(
user,
BOOKIE_ACCOUNT,
amount,
'Fine: %s' % details,
msg_fn,
can_overdraft=True)
def ProcessPayment(self,
customer,
merchants,
num_coins,
details,
msg_fn,
can_overdraft=False,
merchant_weights=None):
"""Process payment from customer to merchant.
The merchant will only be paid if the customer has the funds.
Args:
customer: {User} name of account to withdraw money.
merchants: {User or list<User>} name(s) of account(s) to deposit money.
num_coins: {int} number of hypecoins to transfer.
details: {string} details of transaction.
msg_fn: {callable(channel, msg)} function to send messages.
can_overdraft: {boolean} whether it is possible to overdraft the account.
If True, the account balance can go negative and no fees will be
charged. If False, the transaction will fail and an overdraft fee will
be assessed if there are insufficient funds for the transaction.
merchant_weights: {list<float>} Weight of num_coins that each merchant
will receive. Defaults to all 1's.
Returns:
{boolean} whether payment was successful.
"""
if num_coins < 0:
logging.error('ProcessPayment called with negative value: %s, %s -> %s',
num_coins, customer, merchants)
return False
if isinstance(merchants, user_pb2.User):
merchants = [merchants]
if merchant_weights is None:
merchant_weights = [1] * len(merchants)
total_weight = sum(merchant_weights)
merchant_weights = [w / total_weight for w in merchant_weights]
amount_paid = 0
success = True
for i, (merchant, weight) in enumerate(zip(merchants, merchant_weights)):
# Ensure we don't overpay due to rounding.
merchant_amount = min(
int(round(num_coins * weight)), num_coins - amount_paid)
# Give the last person the extra coin to compensate for them losing a coin
# sometimes.
if i == len(merchants) - 1:
merchant_amount = num_coins - amount_paid
if merchant_amount > 0:
withdrawl_entry = bank_pb2.LedgerEntry(
details=details, counterparty=merchant)
withdrawl_entry.create_time.GetCurrentTime()
deposit_entry = bank_pb2.LedgerEntry(
details=details,
counterparty=customer,
create_time=withdrawl_entry.create_time)
if (self._Withdraw(customer, merchant_amount, withdrawl_entry, msg_fn,
can_overdraft) and
self._Deposit(merchant, merchant_amount, deposit_entry, msg_fn)):
amount_paid += merchant_amount
else:
success = False
return success
def _Deposit(self, user: user_pb2.User, num_coins: int,
entry: bank_pb2.LedgerEntry, msg_fn) -> bool:
"""Adds num_coins to user's balance.
Args:
user: User of account into which to deposit.
num_coins: Number of hype coins to deposit.
entry: Details of transaction.
msg_fn: {callable(channel, msg)} function to send messages.
Returns:
Whether deposit was successful.
"""
if num_coins < 0:
logging.error('Deposit called with negative value: %s, %s', user,
num_coins)
return False
entry.amount = num_coins
tx_name = 'CREDIT %s %s' % (num_coins, user.user_id)
self._store.RunInTransaction(
self._BankTransaction, user, num_coins, entry, tx_name=tx_name)
if msg_fn:
msg_fn(
user, '%s deposited into your account. (%s)' %
(util_lib.FormatHypecoins(num_coins), entry.details))
# TODO: Maybe fix returns now that RunInTransaction can throw.
return True
def _Withdraw(self,
user: user_pb2.User,
num_coins: int,
entry: bank_pb2.LedgerEntry,
msg_fn,
can_overdraft: bool = False) -> bool:
"""Subtracts num_coins from user's balance.
Args:
user: User of account from which to withdraw.
num_coins: Number of hype coins to withdraw.
entry: Details of transaction.
msg_fn: {callable(channel, msg)} function to send messages.
can_overdraft: Whether it is possible to overdraft the account. If True,
the account balance can go negative and no fees will be charged. If
False, the transaction will fail and an overdraft fee will be assessed
if there are insufficient funds for the transaction.
Returns:
Whether withdrawal was successful.
"""
if num_coins < 0:
logging.error('Withdraw called with negative value: %s, %s', user,
num_coins)
return False
# TODO: This should really be a transaction.
with self._withdraw_lock:
balance = self.GetBalance(user)
if balance < num_coins and not can_overdraft:
logging.info('Overdraft: %s, %d > %d', user, num_coins, balance)
overdraft_fee = max(self._MIN_OVERDRAFT_FEE,
int(balance * self._MAX_OVERDRAFT_FEE_PERCENT))
self.ProcessPayment(
user,
FEE_ACCOUNT,
overdraft_fee,
'Overdraft fee',
msg_fn,
can_overdraft=True)
return False
entry.amount = -num_coins
tx_name = 'DEBIT %s %s' % (num_coins, user.user_id)
self._store.RunInTransaction(
self._BankTransaction, user, -num_coins, entry, tx_name=tx_name)
if msg_fn:
msg_fn(
user, '%s withdrawn from your account. (%s)' %
(util_lib.FormatHypecoins(num_coins), entry.details))
# TODO: Maybe fix returns now that RunInTransaction can throw.
return True
def _BankTransaction(self,
user: user_pb2.User,
delta: int,
entry: bank_pb2.LedgerEntry,
tx=None):
"""Executes a hypecoin balance update, storing details in a log."""
try:
self._store.UpdateValue(user.user_id, self._BALANCE_SUBKEY, delta, tx)
self._store.PrependValue(
user.user_id,
self._TRANSACTION_SUBKEY,
json_format.MessageToDict(entry),
max_length=20,
tx=tx)
except Exception as e:
logging.error('BankTransaction failed: %s', entry)
raise e
```
#### File: plugins/league/esports_lib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import Counter
from collections import defaultdict
import copy
import os
import random
from threading import RLock
from absl import logging
import arrow
from hypebot.core import name_complete_lib
from hypebot.core import util_lib
from hypebot.data.league import messages
from hypebot.data.league import nicknames
from hypebot.protos import esports_pb2
from hypebot.protos.riot.v4 import constants_pb2
from hypebot.protos.riot.v4 import league_pb2
# pylint: disable=line-too-long
from google.protobuf import json_format
# pylint: enable=line-too-long
class Match(object):
"""Wraps protobuf match with business logic."""
def __init__(self, match):
self._match = match
self.time = arrow.get(self._match.timestamp)
# Matches in the past are assumed to have already been announced.
self.announced = arrow.utcnow() > self.time
def __repr__(self):
return self._match.__repr__()
def __str__(self):
return str(self._match)
def __getattr__(self, attr):
return getattr(self._match, attr)
class TournamentProvider(object):
"""Provides common interface to a professional tournament."""
def __init__(self, stats_enabled=False):
self.stats_enabled = stats_enabled
@property
def league_id(self):
"""Unique abbreviation for league."""
pass
@property
def name(self):
"""Human readable name of league."""
pass
@property
def aliases(self):
"""List of alternate names for the league."""
return []
@property
def teams(self):
"""List of teams participating in tournament."""
return []
@property
def brackets(self):
"""List of brackets in the tournament.
A tournament may be split into multiple brackets/rounds. E.g., a simple
tournament may consist of a regular season followed by playoffs. Or a
complex tournament can have play-in rounds, multiple pools, and a playoffs.
Returns:
List of brackets.
"""
return []
def _MakeBracketId(self, bracket_name):
"""Make bracket id such that it is globally unique."""
return '%s-%s' % (self.league_id, bracket_name)
def LoadData(self):
"""(Re)loads all data associated with the tournament."""
raise NotImplementedError('TournamentProviders must be able to LoadData.')
def UpdateMatches(self):
"""Poll for match updates.
Returns a list of matches which have changed since the last LoadData.
It may be smarter to allow users of the provider to register a callback to
get "push" notifications for match updates, but that would require more
changes to hypebot.
"""
raise NotImplementedError(
'TournamentProviders must be able to UpdateMatches.')
class RitoProvider(TournamentProvider):
"""Provides data for Rito tournaments.
Scrapes lolesports.com undocumented APIs.
"""
# This was pulled from dev-tools. Uncertain how long before it changes.
_API_KEY = '<KEY>'
_POSITIONS = {
'toplane': 'Top',
'jungle': 'Jungle',
'midlane': 'Mid',
'adcarry': 'ADC',
'support': 'Support',
}
def __init__(self, proxy, region, league_id, aliases=None, **kwargs):
super(RitoProvider, self).__init__(**kwargs)
self._proxy = proxy
self._region = region
self._league_id = league_id
self._aliases = aliases or []
self._lock = RLock()
self._teams = {}
self._brackets = {}
self._matches = {}
@property
def league_id(self):
return self._league_id
@property
def name(self):
return self._region
@property
def aliases(self):
return self._aliases
@property
def brackets(self):
with self._lock:
return self._brackets.values()
@property
def teams(self):
with self._lock:
return self._teams.values()
def _FetchEsportsData(self,
api_endpoint,
params=None,
force_lookup=True,
use_storage=False):
"""Gets eSports data from rito, bypassing the cache if force_lookup.
Args:
api_endpoint: Which endpoint to call. Known endpoints:
* getLeagues
* getTournamentsForLeague: leagueId
* getStandings: tournamentId
* getCompletedEvents: tournamentId
params: GET params to populate for endpoint.
force_lookup: Whether to force lookup or to allow caching.
use_storage: Whether to store result in data store for caching.
Returns:
Dictionary of JSON response from endpoint.
"""
base_esports_url = 'https://esports-api.lolesports.com/persisted/gw/'
full_url = os.path.join(base_esports_url, api_endpoint)
headers = {
'x-api-key': self._API_KEY,
}
request_params = {
'hl': 'en-US',
}
if params:
request_params.update(params)
data = self._proxy.FetchJson(
full_url,
params=request_params,
headers=headers,
force_lookup=force_lookup,
use_storage=use_storage)
if data:
return data['data']
return {}
def _LoadTeam(self, slug):
"""Loads information about a team from Rito."""
team_data = self._FetchEsportsData('getTeams', {'id': slug})
team_data = util_lib.Access(team_data, 'teams.0')
if not team_data:
logging.warning('Failed to load team: %s', slug)
return
team = esports_pb2.Team(
team_id=team_data['id'],
name=team_data['name'],
abbreviation=team_data['code'])
observed_positions = set()
for player_data in team_data['players']:
position = player_data['role'].title()
team.players.add(
summoner_name=player_data['summonerName'],
position=position,
is_substitute=position in observed_positions,
team_id=team.team_id)
observed_positions.add(position)
self._teams[team.team_id] = team
def _LoadStandings(self, tournament_id):
"""(Re)loads standings for a given tournament_id.
Note: If no bracket exists for the given `stage` of the tournament, this
will create one, otherwise it will simply clear the existing standings for
the bracket and update in place.
Args:
tournament_id: ID of tournament.
"""
standings_data = self._FetchEsportsData(
'getStandings', {'tournamentId': tournament_id})
if not standings_data or 'standings' not in standings_data:
logging.error('Failed to get standings.')
return
for stage in standings_data['standings'][0]['stages']:
for section in stage['sections']:
# The section name is more of a slug and not a pretty name. Lolesports
# uses JavaScript and a locale file to convert to human name, but we do
# the best we can given that it's non-trivial (and even more brittle) to
# try to parse the JS to figure out exactly what to display.
section_name = section['name'].replace('_', ' ').title()
full_name = '%s: %s' % (stage['name'], section_name)
bracket_id = self._MakeBracketId(full_name)
if bracket_id in self._brackets:
b = self._brackets[bracket_id]
del b.standings[:]
else:
b = esports_pb2.Bracket(
bracket_id=bracket_id,
name=full_name,
league_id=self._league_id,
is_playoffs=stage['slug'] == 'playoffs')
self._brackets[b.bracket_id] = b
rankings = util_lib.Access(section, 'rankings')
if not rankings:
continue
for group in rankings:
for team in group['teams']:
if team['id'] not in self._teams:
self._LoadTeam(team['slug'])
if team['id'] not in self._teams:
# New for 2020, we have TBD teams which don't exist.
continue
b.standings.add(
rank=group['ordinal'],
wins=team['record']['wins'],
losses=team['record']['losses'],
team=self._teams[team['id']])
def _LoadSchedule(self, bracket):
"""Loads schedule for given bracket.
This is a lie. It loads the schedule for the bracket's tournament and
pretends like all matches belong to this bracket since Rito no longer
provides indication which bracket a match belongs to.
Args:
bracket: The bracket for which the schedule should be loaded.
Returns:
List of matches that were updated i.e., completed since the last time that
_LoadSchedule was called.
"""
updated_matches = []
schedule_data = self._FetchEsportsData(
'getSchedule', {'leagueId': bracket.league_id})
schedule_data = util_lib.Access(schedule_data, 'schedule.events')
if not schedule_data:
logging.warning('Failed to load schedule for %s', bracket.name)
return []
for event in schedule_data:
winner = 'TIE' if event['state'] == 'completed' else None
if util_lib.Access(event, 'match.teams.0.result.outcome') == 'win':
winner = self._FindTeamId(event['match']['teams'][0]['code'])
elif util_lib.Access(event, 'match.teams.1.result.outcome') == 'win':
winner = self._FindTeamId(event['match']['teams'][1]['code'])
match_id = event['match']['id']
if match_id in self._matches:
match = self._matches[match_id]
if winner and not match.winner:
match.winner = winner
updated_matches.append(match)
else:
match = bracket.schedule.add(
match_id=event['match']['id'],
bracket_id=bracket.bracket_id,
red=self._FindTeamId(event['match']['teams'][0]['code']),
blue=self._FindTeamId(event['match']['teams'][1]['code']),
timestamp=arrow.get(event['startTime']).timestamp,
winner=winner)
self._matches[match.match_id] = match
return updated_matches
def _FindTeamId(self, code):
for team in self._teams.values():
if team.abbreviation == code:
return team.team_id
return 'TBD'
def LoadData(self):
with self._lock:
self._teams = {}
self._matches = {}
self._brackets = {}
self._LoadData()
def _LoadData(self):
"""Helper method to load data."""
updated_matches = []
tournament_data = self._FetchEsportsData('getTournamentsForLeague',
{'leagueId': self._league_id})
if not tournament_data or 'leagues' not in tournament_data:
logging.error('Could not get tournaments for league: %s', self._region)
return []
for tournament in self._FindActiveTournaments(
# We requested a single league so we try to take the first.
util_lib.Access(tournament_data, 'leagues.0.tournaments', [])):
self._LoadStandings(tournament['id'])
updated_matches.extend(
self._LoadSchedule(list(self._brackets.values())[0]))
return updated_matches
def UpdateMatches(self):
with self._lock:
return self._LoadData()
def _FindActiveTournaments(self, tournaments):
"""From a list of highlanderTournaments, finds all active or most recent."""
active_tournaments = []
most_recent_tournament = None
newest_start_date = arrow.Arrow.min
t_now = arrow.utcnow()
for t in tournaments:
if 'startDate' not in t:
continue
t_start_date = arrow.get(t['startDate'])
t_end_date = arrow.get(t['endDate'])
if t_start_date > newest_start_date:
newest_start_date = t_start_date
most_recent_tournament = t
if t_start_date <= t_now <= t_end_date:
active_tournaments.append(t)
return active_tournaments or [most_recent_tournament]
class BattlefyProvider(TournamentProvider):
"""Uses unofficial Battlefy APIs to provide tournament data."""
_BASE_URL = 'https://api.battlefy.com'
def __init__(self, proxy, rito, league_id, alias, realm='NA1', **kwargs):
super(BattlefyProvider, self).__init__(**kwargs)
self._proxy = proxy
self._rito = rito
self._league_id = league_id
self._alias = alias
self._realm = realm
self._teams = {}
self._brackets = {}
self._matches = {}
self._lock = RLock()
@property
def league_id(self):
return self._league_id
@property
def name(self):
return self._alias
@property
def aliases(self):
return [self._alias]
@property
def brackets(self):
with self._lock:
return self._brackets.values()
@property
def teams(self):
with self._lock:
return self._teams.values()
def _PlayerRank(self, summoner_name):
"""Returns rank of player, e.g., D4."""
rank = '?'
summoner = self._rito.GetSummoner(self._realm, summoner_name)
if not summoner:
return rank
response = self._rito.ListLeaguePositions(self._realm, summoner.id)
if not response:
return rank
for league in response.positions:
if league.queue_type == constants_pb2.QueueType.RANKED_SOLO_5x5:
tier = constants_pb2.Tier.Enum.Name(league.tier)[0].upper()
division = {
'I': '1',
'II': '2',
'III': '3',
'IV': '4'
}[league_pb2.TierRank.Enum.Name(league.rank)]
rank = tier + division
break
return rank
def _LoadTeams(self):
"""Load teams."""
with self._lock:
teams = self._proxy.FetchJson(
'/'.join([self._BASE_URL, 'tournaments', self.league_id, 'teams']),
force_lookup=True)
for team in teams:
self._teams[team['_id']] = esports_pb2.Team(
team_id=team['_id'],
name=team['name'],
# Battlefy does not provide team abbreviations, so we make our best
# guess by using the first letter of each word. There may be
# collisions, and not all names produce desirable/informative
# abbreviations. E.g., Adobe #FF0000 -> A#. Poor abbreviations may
# require using the full team name for auto-complete.
abbreviation=''.join([word[0] for word in team['name'].split()
]).upper(),
league_id=team['tournamentID'])
for player in team['players']:
self._teams[team['_id']].players.add(
summoner_name=player['inGameName'],
team_id=team['_id'],
position=self._PlayerRank(player['inGameName']))
def _UpdateStandings(self, bracket):
stage_id = bracket.bracket_id.split('-')[-1]
standings = self._proxy.FetchJson(
'/'.join([self._BASE_URL, 'stages', stage_id,
'latest-round-standings']),
force_lookup=True)
del bracket.standings[:]
for rank, standing in enumerate(standings):
bracket.standings.add(
team=self._teams[standing['teamID']],
rank=rank + 1,
wins=standing['wins'],
losses=standing['losses'],
ties=standing['ties'],
points=standing['points'])
def _MatchWinner(self, match):
winner = 'TIE' if match.get('isComplete') else None
if util_lib.Access(match, 'top.winner'):
winner = match['top']['teamID']
elif util_lib.Access(match, 'bottom.winner'):
winner = match['bottom']['teamID']
return winner
def _LoadStage(self, stage_id):
"""Loads a single stage (bracket)."""
with self._lock:
stage = self._proxy.FetchJson(
'/'.join([self._BASE_URL, 'stages', stage_id]), force_lookup=True)
bracket = esports_pb2.Bracket(
bracket_id=self._MakeBracketId(stage_id),
name=stage['name'],
league_id=self.league_id,
is_playoffs='playoff' in stage['name'].lower())
self._brackets[bracket.bracket_id] = bracket
matches = self._proxy.FetchJson(
'/'.join([self._BASE_URL, 'stages', stage_id, 'matches']),
force_lookup=True)
# Battlefy doesn't provide actual match start times. We assume that
# matches are only provided for the current week. And then replace with
# completed time if it exists.
default_match_time = util_lib.ArrowTime(
weekday=5, hour=12, tz='America/Los_Angeles')
for match in matches:
match_time = default_match_time
if 'completedAt' in match:
match_time = arrow.get(match['completedAt'])
m = bracket.schedule.add(
match_id=match['_id'],
bracket_id=bracket.bracket_id,
red=util_lib.Access(match, 'top.teamID', 'BYE'),
blue=util_lib.Access(match, 'bottom.teamID', 'BYE'),
timestamp=match_time.timestamp,
winner=self._MatchWinner(match))
self._matches[m.match_id] = m
stats = None
if self.stats_enabled and m.winner:
stats = self._proxy.FetchJson(
'/'.join([self._BASE_URL, 'matches', m.match_id]),
params={'extend[stats]': 'true'},
force_lookup=True)
for stat_idx, game_id in enumerate(match.get('appliedRiotGameIDs', [])):
game = m.games.add(
game_id=game_id, realm=self._realm, hash=match['lolHookUrl'])
game_stats = util_lib.Access(stats, '0.stats.%d.stats' % stat_idx)
if game_stats:
self._ParseGameStats(game, game_stats)
self._UpdateStandings(bracket)
def _ParseGameStats(self, game, stats):
"""Maps from Battlefy stats to rito Match proto."""
game.stats.game_id = stats['gameId']
game.stats.game_duration = stats['gameLength']
game.stats.game_mode = stats['gameMode']
game.stats.game_type = stats['gameType']
game.stats.game_version = stats['gameVersion']
game.stats.map_id = stats['mapId']
game.stats.platform_id = stats['platformId']
for team in stats['teamStats']:
team_stats = game.stats.teams.add()
json_format.ParseDict(team, team_stats)
for team in stats['teams']:
for player in team['players']:
participant = game.stats.participants.add()
json_format.ParseDict(player, participant, ignore_unknown_fields=True)
identity = game.stats.participant_identities.add()
identity.participant_id = player['participantId']
identity.player.summoner_name = player['summonerName']
def _UpdateSchedule(self, bracket):
"""Updates a single brackets schedule."""
updated_matches = []
stage_id = bracket.bracket_id.split('-')[-1]
matches = self._proxy.FetchJson(
'/'.join([self._BASE_URL, 'stages', stage_id, 'matches']),
force_lookup=True)
for match in matches:
m = self._matches.get(match['_id'])
if not m or m.winner:
continue
winner = self._MatchWinner(match)
if winner:
m.winner = winner
updated_matches.append(m)
return updated_matches
def LoadData(self):
with self._lock:
self._teams = {}
self._brackets = {}
self._matches = {}
# Load teams first since we refer to these when loading stage standings.
self._LoadTeams()
response = self._proxy.FetchJson(
'/'.join([self._BASE_URL, 'tournaments', self.league_id]),
force_lookup=True)
for stage_id in response['stageIDs']:
self._LoadStage(stage_id)
def UpdateMatches(self):
updated_matches = []
with self._lock:
for bracket in self._brackets.values():
updated_matches.extend(self._UpdateSchedule(bracket))
self._UpdateStandings(bracket)
return updated_matches
class GrumbleProvider(TournamentProvider):
"""Provide tournament information for a Grumble division."""
_BASE_URL = ('http://goog-lol-tournaments.appspot.com/rest/')
def __init__(self, proxy, division='D1', realm='NA1', year=2019, **kwargs):
super(GrumbleProvider, self).__init__(**kwargs)
self._proxy = proxy
self._division = division
self._realm = realm
self._year = year
self._teams = {}
self._brackets = {}
self._matches = {}
self._lock = RLock()
@property
def league_id(self):
return 'grumble-%s' % self._division
@property
def name(self):
return 'Draft' if self._division == 'D1' else 'Open'
@property
def aliases(self):
return [self._division]
@property
def brackets(self):
with self._lock:
return self._brackets.values()
@property
def teams(self):
with self._lock:
return self._teams.values()
def _FetchJson(self, end_point, path_parts, use_year=True, **kwargs):
parts = [self._BASE_URL, end_point]
if use_year:
parts.append('grumble-%s' % self._year)
parts.extend(path_parts)
return self._proxy.FetchJson('/'.join(parts), **kwargs)
def _ParseSchedule(self, schedule, bracket):
"""Parse schedule into bracket."""
match_count = 0
standings = {}
with self._lock:
for week in schedule:
for match in week['matches']:
match_count += 1
m = bracket.schedule.add(
match_id='%s-%s-%s' %
(self.league_id, bracket.bracket_id, match_count),
bracket_id=bracket.bracket_id,
blue=util_lib.Access(match, 'team1.ref.id', 'TBD'),
red=util_lib.Access(match, 'team2.ref.id', 'TBD'),
timestamp=match['timestampSec'])
self._matches[m.match_id] = m
for game in match['games']:
game_proto = m.games.add(
game_id=str(util_lib.Access(game, 'ref.gameId')),
realm=self._realm,
hash=util_lib.Access(game, 'ref.tournamentCode'))
if self.stats_enabled and util_lib.Access(game, 'winner'):
response = self._FetchJson(
'game', [game_proto.game_id, game_proto.hash],
use_year=False,
use_storage=True)
if response:
json_format.ParseDict(
response, game_proto.stats, ignore_unknown_fields=True)
for team in [match['team1'], match['team2']]:
team_id = util_lib.Access(team, 'ref.id')
if not team_id:
continue
if team_id not in self._teams:
self._teams[team_id] = esports_pb2.Team(
team_id=team_id,
abbreviation=team_id,
name=team['ref']['displayName'],
league_id=self.league_id)
if team_id not in standings:
standings[team_id] = esports_pb2.TeamStanding(
team=self._teams[team_id])
if not team['outcome']:
continue
if team['outcome'] == 'VICTORY':
m.winner = team_id
standings[team_id].wins += 1
standings[team_id].points += 3
elif team['outcome'] == 'TIE':
m.winner = 'TIE'
standings[team_id].ties += 1
standings[team_id].points += 1
else:
standings[team_id].losses += 1
standings = sorted(
standings.values(), key=lambda x: x.points, reverse=True)
rank = 1
cur_points = -1
for i, team in enumerate(standings):
if team.points != cur_points:
rank = i + 1
cur_points = team.points
team.rank = rank
bracket.standings.extend(standings)
def LoadData(self):
"""Scrape goog-lol-tournament REST API for tournament data."""
with self._lock:
self._teams = {}
self._brackets = {}
self._matches = {}
self._brackets['practice'] = esports_pb2.Bracket(
bracket_id=self._MakeBracketId('practice'),
name='Practice',
league_id=self.league_id)
response = self._FetchJson(
'bracket', [self._division, 'practice'], force_lookup=True)
self._ParseSchedule(response['schedule'], self._brackets['practice'])
self._brackets['season'] = esports_pb2.Bracket(
bracket_id=self._MakeBracketId('season'),
name='Regular Season',
league_id=self.league_id)
response = self._FetchJson(
'bracket', [self._division, 'season'], force_lookup=True)
self._ParseSchedule(response['schedule'], self._brackets['season'])
self._brackets['playoffs'] = esports_pb2.Bracket(
bracket_id=self._MakeBracketId('playoffs'),
name='Playoffs',
is_playoffs=True,
league_id=self.league_id)
response = self._FetchJson(
'bracket', [self._division, 'playoffs'], force_lookup=True)
self._ParseSchedule(response['schedule'], self._brackets['playoffs'])
for team_id, team in self._teams.items():
response = self._FetchJson('team', [self._division, team_id])
if not response:
continue
for player in response['players']:
team.players.add(
summoner_name=player['summonerName'],
team_id=team_id,
position=random.choice(['Fill', 'Feed']))
try:
# Update brackets standings with updated team data since protos copy.
for bracket in self._brackets.values():
for team_standings in bracket.standings:
if team_standings.team.team_id == team_id:
team_standings.team.CopyFrom(team)
except Exception as e:
logging.warning('Woops: %s', e)
def _UpdateSchedule(self, schedule, bracket):
"""Update existing matches if they are now wonnered."""
updated_matches = []
match_count = 0
with self._lock:
for week in schedule:
for match in week['matches']:
match_count += 1
match_id = '%s-%s-%s' % (self.league_id, bracket.bracket_id,
match_count)
old_match = self._matches.get(match_id)
if not old_match or old_match.winner:
continue
for team in [match['team1'], match['team2']]:
team_id = util_lib.Access(team, 'ref.id')
if not team_id or not team['outcome']:
continue
if team['outcome'] == 'VICTORY':
old_match.winner = team_id
elif team['outcome'] == 'TIE':
old_match.winner = 'TIE'
if old_match.winner:
updated_matches.append(old_match)
return updated_matches
def UpdateMatches(self):
updated_matches = []
with self._lock:
response = self._FetchJson(
'bracket', [self._division, 'practice'], force_lookup=True)
updated_matches.extend(
self._UpdateSchedule(response['schedule'],
self._brackets['practice']))
response = self._FetchJson(
'bracket', [self._division, 'season'], force_lookup=True)
updated_matches.extend(
self._UpdateSchedule(response['schedule'], self._brackets['season']))
response = self._FetchJson(
'bracket', [self._division, 'playoffs'], force_lookup=True)
updated_matches.extend(
self._UpdateSchedule(response['schedule'],
self._brackets['playoffs']))
return updated_matches
class EsportsLib(object):
"""Electronic Sports Library."""
def __init__(self, proxy, executor, game_lib, rito_tz, rito_lib):
self._proxy = proxy
self._executor = executor
self._game = game_lib
self._timezone = rito_tz
self._rito = rito_lib
self._providers = [
RitoProvider(
self._proxy,
'LCS',
'98767991299243165',
aliases=['NA', 'North America'],
stats_enabled=False),
RitoProvider(
self._proxy,
'LEC',
'98767991302996019',
aliases=['EU', 'Europe'],
stats_enabled=False),
RitoProvider(
self._proxy,
'LCK',
'98767991310872058',
aliases=['KR', 'Korea'],
stats_enabled=False),
RitoProvider(
self._proxy,
'Worlds',
'98767975604431411',
aliases=['IN'],
stats_enabled=False),
]
self._lock = RLock()
self._teams = {}
self._schedule = []
self._matches = {}
self._brackets = {}
self._leagues = {}
self._summoner_data = {}
# Maintains mappings from champ_key -> per-region stats about picks, etc.
self._champ_stats = defaultdict(lambda: defaultdict(Counter))
# Maintains mappings from player_key -> various stats about the player like
# per-champion wins/picks/etc.
self._player_stats = defaultdict(lambda: defaultdict(Counter))
# Maintains total number of games played per region
self._num_games = Counter()
self._callbacks = []
# Load eSports data in the background on startup so we don't have to wait
# for years while we fetch the bio of every player in multiple languages
# once for every API call we make regardless of endpoint.
self._load_status = self._executor.submit(self.LoadEsports)
@property
def teams(self):
"""Dictionary of [team id, name] => team."""
with self._lock:
return self._teams
@property
def schedule(self):
"""Time ordered list of matches."""
with self._lock:
return self._schedule
@property
def matches(self):
"""Dictionary of match id => match."""
with self._lock:
return self._matches
@property
def brackets(self):
"""Dictionary of bracket id => bracket."""
with self._lock:
return self._brackets
@property
def leagues(self):
"""Dictionary of [league id, alias, region] => bracket."""
with self._lock:
return self._leagues
def RegisterCallback(self, fn):
"""Register a function to be called whenever the esports data is updated."""
self._callbacks.append(fn)
def IsReady(self):
"""Returns if all the dependant data for EsportsLib has been loaded."""
return self._load_status.done()
def ReloadData(self):
self._proxy.FlushCache()
self.LoadEsports()
def LoadEsports(self):
"""Loads "static" data about each league."""
# Reloading providers and aggregating data is slow. So we use temporary
# variables and operate outside of the lock to allow other esports commands
# to resolve.
# TODO: Reload each provider in it's own threadpool.
try:
for provider in self._providers:
provider.LoadData()
except Exception as e:
logging.error('Failed to load esports')
logging.exception(e)
teams = {}
matches = {}
brackets = {}
leagues = {}
summoner_data = {}
champ_stats = defaultdict(lambda: defaultdict(Counter))
player_stats = defaultdict(lambda: defaultdict(Counter))
num_games = Counter()
for league in self._providers:
leagues[league.league_id] = league
for bracket in league.brackets:
brackets[bracket.bracket_id] = bracket
for team_standings in bracket.standings:
for player in team_standings.team.players:
summoner_data[util_lib.CanonicalizeName(player.summoner_name)] = (
team_standings)
for match in bracket.schedule:
match = Match(match)
matches[match.match_id] = match
if match.winner and league.stats_enabled:
self._ScrapePickBanData(league, match, champ_stats, player_stats,
num_games)
for team in league.teams:
teams[team.team_id] = team
with self._lock:
team_aliases = {team.name: team_id for team_id, team in teams.items()}
team_aliases.update(
{team.abbreviation: team_id for team_id, team in teams.items()})
self._teams = name_complete_lib.NameComplete(team_aliases, teams)
self._schedule = sorted(matches.values(), key=lambda x: x.timestamp)
self._matches = matches
self._brackets = brackets
league_aliases = {
league.name: league.league_id for league in leagues.values()
}
for league in leagues.values():
for alias in league.aliases:
league_aliases[alias] = league.league_id
self._leagues = name_complete_lib.NameComplete(league_aliases, leagues)
self._summoner_data = summoner_data
self._champ_stats = champ_stats
self._player_stats = name_complete_lib.NameComplete(
nicknames.LCS_PLAYER_NICKNAME_MAP, player_stats,
[x['name'] for x in player_stats.values()])
self._num_games = num_games
logging.info('Loading esports complete, running callbacks.')
for fn in self._callbacks:
fn()
logging.info('Esports callbacks complete.')
def UpdateEsportsMatches(self):
"""Determines if any matches have been wonnered and returns them."""
updated_matches = []
for provider in self._providers:
updated_matches.extend(provider.UpdateMatches())
return updated_matches
def Who(self, summoner):
"""Gets the TeamStandings for the summoner to display in !who."""
summoner = util_lib.CanonicalizeName(summoner['summoner'])
with self._lock:
return self._summoner_data.get(summoner)
def GetLivestreamLinks(self):
"""Get links to the livestream(s), if any are currently active.
Returns:
Dict of match_id to link for livestreams.
"""
# TODO: Determine how to handle livestream links. Rito provides a
# single call to fetch all stream links regardless of the tournament, and
# grumble does not currently provide links.
return {}
def GetSchedule(self, subcommand, include_playoffs, num_games=5):
"""Get the schedule for the specified region or team."""
qualifier = 'All'
display_qualifier = 'All'
with self._lock:
if subcommand in self.teams:
qualifier = self.teams[subcommand].team_id
display_qualifier = self.teams[subcommand].name
if subcommand in self.leagues:
qualifier = self.leagues[subcommand].league_id
display_qualifier = self.leagues[subcommand].name
now = arrow.utcnow()
schedule = []
livestream_links = self.GetLivestreamLinks()
for match in self.schedule:
if self._MatchIsInteresting(match, qualifier, now, include_playoffs):
# If the game is in the future, add a livestream link if one exists. If
# the game is considered live, add either an existing livestream link or
# the fallback link.
if match.time > now:
if match.time == arrow.Arrow.max:
# This means rito hasn't scheduled this match yet
date_time = 'TBD'
else:
local_time = match.time.to(self._timezone)
date_time = local_time.strftime('%a %m/%d %I:%M%p %Z')
if match.match_id in livestream_links:
date_time += ' - %s' % livestream_links[match.match_id]
else:
date_time = 'LIVE'
if match.match_id in livestream_links:
date_time += ' - %s' % livestream_links[match.match_id]
num_games_str = ''
if match.games:
num_games_str = 'Bo%s - ' % len(match.games)
blue_team = self.MatchTeamName(match.blue)
blue_team = util_lib.Colorize('{:3}'.format(blue_team), 'blue')
red_team = self.MatchTeamName(match.red)
red_team = util_lib.Colorize('{:3}'.format(red_team), 'red')
schedule.append('{} v {}: {}{}'.format(blue_team, red_team,
num_games_str, date_time))
if len(schedule) >= num_games:
break
if not schedule:
schedule = [messages.SCHEDULE_NO_GAMES_STRING]
qualifier = 'No'
display_qualifier = 'No'
return schedule[:num_games], display_qualifier
def MatchTeamName(self, team_id):
"""Extract the team name (abbreviation) from their "id".
For matches, sometimes we don't store an actual team_id in the red/blue slot
since their could be a bye. The same goes for the winner if it is a tie. In
these cases, we want the display name to simply be whatever string we stored
in the team_id field.
Args:
team_id: Unique identifier of the team. E.g., from match.blue, match.red,
or match.winner.
Returns:
A short, human readable name for the team.
"""
return (self.teams[team_id].abbreviation
if team_id in self.teams else team_id)
def GetResults(self, subcommand, num_games=5):
"""Get the results of past games for the specified region or team."""
qualifier = 'All'
display_qualifier = 'All'
is_team = False
with self._lock:
if subcommand in self.teams:
qualifier = self.teams[subcommand].team_id
display_qualifier = self.teams[subcommand].name
is_team = True
if subcommand in self.leagues:
qualifier = self.leagues[subcommand].league_id
display_qualifier = self.leagues[subcommand].name
is_team = False
results = []
# Shallow copy so we don't actually reverse the schedule
tmp_schedule = self.schedule[:]
tmp_schedule.reverse()
for match in tmp_schedule:
if self._ResultIsInteresting(match, qualifier):
blue_team = self.MatchTeamName(match.blue)
blue_team = util_lib.Colorize('{:3}'.format(blue_team), 'blue')
red_team = self.MatchTeamName(match.red)
red_team = util_lib.Colorize('{:3}'.format(red_team), 'red')
is_tie = match.winner == 'TIE'
if is_team:
winner_msg = 'Tie' if is_tie else (
'Won!' if match.winner == qualifier else 'Lost')
else:
winner_msg = '{} {}'.format(
match.winner if is_tie else util_lib.Colorize(
'{:3}'.format(self.teams[match.winner].abbreviation),
'red' if match.red == match.winner else 'blue'),
':^)' if is_tie else 'wins!')
results.append('{} v {}: {}'.format(blue_team, red_team, winner_msg))
if len(results) >= num_games:
break
return results[:num_games], display_qualifier
def GetStandings(self, req_region, req_bracket):
"""Gets the standings for a specified region and bracket.
Args:
req_region: Search term for region. May be an alias, full name, or
anything that can uniquely identify the region.
req_bracket: Search term for bracket. Most regions have multiple brackets,
e.g., season and playoffs. International tournaments with groups tend to
have many brackets. This is a simplistic search with looks for the
characters anywhere in the bracket name.
Returns:
A list of standings for each matching bracket. Each standings is a dict
containing the `league`, `bracket`, and a sorted list of `teams`.
"""
league = self.leagues[req_region]
if not league:
return []
standings = []
for bracket in league.brackets:
if not (req_bracket.upper() in bracket.name.upper() or
req_bracket in bracket.bracket_id):
continue
standings.append({
'league': league,
'bracket': bracket,
'teams': sorted(bracket.standings, key=lambda x: x.rank),
})
return standings
def GetChampPickBanRate(self, region, champ):
"""Returns pick/ban data for champ, optionally filtered by region."""
champ_id = self._game.GetChampId(champ)
with self._lock:
if region in self.leagues:
region = self.leagues[region].league_id
if not champ_id:
logging.info('%s doesn\'t map to any known champion', champ)
return None, None
canonical_name = self._game.GetChampDisplayName(champ)
pick_ban_data = self._champ_stats[champ_id]
summed_pick_ban_data = Counter()
for region_data in [
v for k, v in pick_ban_data.items() if region in ('all', k)
]:
summed_pick_ban_data.update(region_data)
summed_pick_ban_data['num_games'] = sum(
v for k, v in self._num_games.items() if region in ('all', k))
return canonical_name, summed_pick_ban_data
def GetPlayerChampStats(self, query):
"""Returns champ statistics for LCS player."""
player_info = self._player_stats[query]
if not player_info:
logging.info('%s is ambiguous or doesn\'t map to any known player', query)
return None, None
player_data = {}
player_data['champs'] = copy.deepcopy(player_info)
player_data['num_games'] = copy.deepcopy(player_info['num_games'])
# Remove non-champ keys from the champ data. This is also why we need the
# deepcopies above.
del player_data['champs']['name']
del player_data['champs']['num_games']
return player_info['name'], player_data
def GetTopPickBanChamps(self, region, sort_key_fn, descending=True):
"""Returns the top 5 champions sorted by sort_key_fn for region."""
with self._lock:
if region in self.leagues:
region = self.leagues[region].league_id
champs_to_flattened_data = {}
for champ_id, region_data in self._champ_stats.items():
champ_name = self._game.GetChampNameFromId(champ_id)
if not champ_name:
logging.error('Couldn\'t parse %s into a champ_name', champ_id)
continue
champs_to_flattened_data[champ_name] = Counter()
for r, pb_data in region_data.items():
if region in ('all', r):
champs_to_flattened_data[champ_name].update(pb_data)
num_games = sum(
v for k, v in self._num_games.items() if region in ('all', k))
# First filter out champs who were picked in fewer than 5% of games
filtered_champs = [
x for x in champs_to_flattened_data.items()
if x[1]['picks'] >= (num_games / 20.0)
]
# We sort by the secondary key (picks) first
sorted_champs = sorted(
filtered_champs, key=lambda x: x[1]['picks'], reverse=True)
sorted_champs.sort(key=lambda x: sort_key_fn(x[1]), reverse=descending)
logging.info('TopPickBanChamps in %s [key=%s, desc? %s] => (%s, %s)',
region, sort_key_fn, descending, num_games, sorted_champs[:5])
return num_games, sorted_champs[:5]
def GetUniqueChampCount(self, region):
"""Calculates how many unique champs have been picked/banned."""
with self._lock:
if region in self.leagues:
region = self.leagues[region].league_id
unique = set([
self._game.GetChampNameFromId(k)
for k, c in self._champ_stats.items()
if region == 'all' or region in c.keys()
])
num_games = sum(
v for k, v in self._num_games.items() if region in ('all', k))
return len(unique), num_games
def _MatchIsInteresting(self, match, qualifier, now, include_playoffs):
"""Small helper method to check if a match is interesting right now."""
bracket = self.brackets[match.bracket_id]
region_and_teams = (bracket.league_id, match.blue, match.red)
return ((qualifier == 'All' or qualifier in region_and_teams) and
not match.winner and
(match.time == arrow.Arrow.max or
# Shift match time 1 hr/game into the future to show live matches.
match.time.shift(hours=len(match.games)) > now) and
(include_playoffs or not bracket.is_playoffs))
def _ResultIsInteresting(self, match, qualifier):
region_and_teams = (self.brackets[match.bracket_id].league_id, match.blue,
match.red)
if ((qualifier == 'All' or qualifier in region_and_teams) and match.winner):
return True
return False
def _ScrapePickBanData(self, league, match, champ_stats, player_stats,
num_games):
"""For each game in match, fetches and tallies pick/ban data from Riot."""
for game in match.games:
# Sometimes the provider was nice and already gave us the game stats.
if not game.HasField('stats'):
if not game.hash:
logging.info(
'Game hash missing. Probably a Bo# series that ended early.')
continue
game_stats = self._proxy.FetchJson(
'https://acs.leagueoflegends.com/v1/stats/game/%s/%s?gameHash=%s' %
(game.realm, game.game_id, game.hash),
use_storage=True)
if not game_stats:
logging.warning('Failed to fetch game stats for game: %s',
game.game_id)
continue
json_format.ParseDict(
game_stats, game.stats, ignore_unknown_fields=True)
num_games[league.league_id] += 1
participant_to_player = {
p.participant_id: p.player.summoner_name
for p in game.stats.participant_identities
}
# Collect ban data.
winning_team = None
for team in game.stats.teams:
if team.win == 'Win':
winning_team = team.team_id
for ban in team.bans:
champ_stats[ban.champion_id][league.league_id]['bans'] += 1
# Collect pick and W/L data
for player in game.stats.participants:
champ_id = player.champion_id
champ_name = self._game.GetChampNameFromId(champ_id)
# We need to use separate player_name and player_key here because Rito
# doesn't like to keep things like capitalization consistent with player
# names so they aren't useful as keys, but we still want to display the
# "canonical" player name back to the user eventually, so we save it as
# a value in player_stats instead.
player_name = participant_to_player[player.participant_id]
player_key = util_lib.CanonicalizeName(player_name or 'hypebot')
player_stats[player_key]['name'] = player_name or 'HypeBot'
if player.team_id == winning_team:
champ_stats[champ_id][league.league_id]['wins'] += 1
player_stats[player_key][champ_name]['wins'] += 1
player_stats[player_key]['num_games']['wins'] += 1
champ_stats[champ_id][league.league_id]['picks'] += 1
player_stats[player_key][champ_name]['picks'] += 1
player_stats[player_key]['num_games']['picks'] += 1
```
#### File: plugins/league/items_lib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
from hypebot.core import name_complete_lib
from typing import Text
class ItemsLib(object):
"""Class for fetching item data from Riot API."""
_ITEM_ALIAS_MAP = {
'bc': 'theblackcleaver',
'blackcleaver': 'theblackcleaver',
'bloothirster': 'essencereaver',
'bootsoflucidity': 'ionianbootsoflucidity',
'bork': 'bladeoftheruinedking',
'botrk': 'bladeoftheruinedking',
'bt': 'thebloodthirster',
'cdrboots': 'ionianbootsoflucidity',
'dcap': 'rabadonsdeathcap',
'fh': 'frozenheart',
'fotm': 'faceofthemountain',
'frozenfist': 'iceborngauntlet',
'ibg': 'iceborngauntlet',
'ie': 'infinityedge',
'lucidityboots': 'ionianbootsoflucidity',
'lw': 'lastwhisper',
'mogs': 'warmogsarmor',
'pd': 'phantomdancer',
'qss': 'quicksilversash',
'rabadabadoodle': 'rabadonsdeathcap',
'runicechoes': 'enchantmentrunicechoes',
'sv': 'spiritvisage',
'swifties': 'bootsofswiftness',
'triforce': 'trinityforce',
}
def __init__(self, rito):
self._rito = rito
self._name_to_item = {}
self.ReloadData()
def ReloadData(self):
"""Reload LoL items-related data into memory from the Rito API."""
r = self._rito.ListItems()
if not r:
return
item_data = r.data
for item in item_data.values():
name = self._GetItemName(item.name)
self._name_to_item[name] = item
self._name_complete = name_complete_lib.NameComplete(
self._ITEM_ALIAS_MAP,
self._name_to_item, (i.name for i in item_data.values()),
dankify=True)
def _GetItemName(self, item_name):
"""Gets Item name without non-alphanumeric chars and all lowercase."""
return ''.join(list(filter(str.isalnum, str(item_name)))).lower()
def GetItemDescription(self, item_name):
"""Returns Item Description."""
# First Get Item
item = self._name_complete.GuessThing(item_name)
# Then Get Item Description
if item:
line = '{} ({} gold):'.format(item.name, item.gold.total)
response = self._CleanItemWrap(self._Sanitize(item.description))
response[0] = line + ' ' + response[0]
return response
else:
return ['Item "{}" not found.'.format(item_name)]
@staticmethod
def _CleanItemWrap(description):
"""Cleanly separates item descriptions."""
result = []
index = 0
slice_state = 0
last_slice = 0
# Separates each Active/Passive/Aura
while index < len(description):
if slice_state == 0 and (description[index:].startswith('UNIQUE ') or
description[index:].startswith('Active ') or
description[index:].startswith('Passive ')):
result.append(description[last_slice:index -1])
slice_state = 1
last_slice = index
elif slice_state == 1 and description[index] == ':':
slice_state = 0
index += 1
description = description[last_slice:]
# Removes all the hints at the end. Example:
# (Unique Passives with the same name don't stack.)
while description[-1] == ')'and description.rfind('(') != -1:
description = description[:description.rfind('(')].strip()
result.append(description)
return result
def _Sanitize(self, raw: Text) -> Text:
return re.sub(r'<.*?>', '', raw)
```
#### File: hypebot/plugins/population_lib.py
```python
from typing import Optional, Text
from absl import logging
import arrow
from hypebot.core import schedule_lib
from hypebot.core import util_lib
from hypebot.proxies import proxy_lib
_DATA_URL = 'https://api.worldbank.org/v2/country/all/indicator/SP.POP.TOTL'
class PopulationLib():
"""Class that serves up populations for various geographical regions."""
# There are APIs for this, but for now hard-coded values are ok.
# Data accurate as of 2020/03/22
# Source:
# https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/national/totals/nst-est2019-alldata.csv
_US_STATE_NAMES = {
'AL': 'Alabama',
'AK': 'Alaska',
'AZ': 'Arizona',
'AR': 'Arkansas',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DE': 'Delaware',
'DC': 'District of Columbia',
'FL': 'Florida',
'GA': 'Georgia',
'HI': 'Hawaii',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'IA': 'Iowa',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'ME': 'Maine',
'MD': 'Maryland',
'MA': 'Massachusetts',
'MI': 'Michigan',
'MN': 'Minnesota',
'MS': 'Mississippi',
'MO': 'Missouri',
'MT': 'Montana',
'NE': 'Nebraska',
'NV': 'Nevada',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NY': 'New York',
'NC': 'North Carolina',
'ND': 'North Dakota',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VT': 'Vermont',
'VA': 'Virginia',
'WA': 'Washington',
'WV': 'West Virginia',
'WI': 'Wisconsin',
'WY': 'Wyoming',
'PR': 'Puerto Rico'
}
_US_STATE_POPULATIONS = {
'AL': 4903185,
'AK': 731545,
'AZ': 7278717,
'AR': 3017804,
'CA': 39512223,
'CO': 5758736,
'CT': 3565287,
'DE': 973764,
'DC': 705749,
'FL': 21477737,
'GA': 10617423,
'HI': 1415872,
'ID': 1787065,
'IL': 12671821,
'IN': 6732219,
'IA': 3155070,
'KS': 2913314,
'KY': 4467673,
'LA': 4648794,
'ME': 1344212,
'MD': 6045680,
'MA': 6892503,
'MI': 9986857,
'MN': 5639632,
'MS': 2976149,
'MO': 6137428,
'MT': 1068778,
'NE': 1934408,
'NV': 3080156,
'NH': 1359711,
'NJ': 8882190,
'NM': 2096829,
'NY': 19453561,
'NC': 10488084,
'ND': 762062,
'OH': 11689100,
'OK': 3956971,
'OR': 4217737,
'PA': 12801989,
'RI': 1059361,
'SC': 5148714,
'SD': 884659,
'TN': 6829174,
'TX': 28995881,
'UT': 3205958,
'VT': 623989,
'VA': 8535519,
'WA': 7614893,
'WV': 1792147,
'WI': 5822434,
'WY': 578759,
'PR': 3193694
}
def __init__(self, proxy: proxy_lib.Proxy):
self._proxy = proxy
self._scheduler = schedule_lib.HypeScheduler()
self._ids_to_names = self._US_STATE_NAMES.copy()
self._populations = self._US_STATE_POPULATIONS.copy()
# 5am is an arbitrary time, can be changed without any semantic effect.
self._scheduler.DailyCallback(
util_lib.ArrowTime(5), self._UpdatePopulations)
self._UpdatePopulations()
def GetPopulation(self, raw_region: Text) -> int:
"""Gets the total population for raw_region, or 0 for unknown regions."""
region = self._NormalizeId(raw_region)
return self._populations.get(region, 0)
def GetNameForRegion(self, raw_region: Text) -> Optional[Text]:
"""Takes user input and tries to convert it to a region."""
region = self._NormalizeId(raw_region)
return self._ids_to_names.get(region)
def IsUSState(self, raw_region: Text) -> bool:
"""Returns if the region passed is a US state or not."""
region = self._NormalizeId(raw_region)
return region in self._US_STATE_NAMES
def _UpdatePopulations(self):
"""Fetches new population data, and updates existing saved data."""
cur_year = arrow.now().year
raw_result = None
try:
raw_result = self._proxy.FetchJson(_DATA_URL, {
'format': 'json',
'source': '40',
'per_page': '500',
'date': cur_year
})
except Exception: # pylint: disable=broad-except
logging.exception('Failed to fetch population data')
if not raw_result or len(raw_result) < 2:
return
raw_result = raw_result[1]
ids_to_names = {}
populations = {}
for region_info in raw_result:
if 'country' not in region_info:
logging.warning('Got population entry with no country entry:\n%s',
region_info)
continue
if not region_info['value']:
logging.info('Got population entry with null value:\n%s', region_info)
continue
country_info = region_info['country']
ids_to_names[country_info['id']] = country_info['value']
populations[country_info['id']] = region_info['value']
self._ids_to_names.update(ids_to_names)
self._populations.update(populations)
logging.info('Populations updated')
def _NormalizeId(self, raw_region: Text) -> Optional[Text]:
"""Takes a user-provided region and tries to map it to a valid region ID."""
region = raw_region.upper().strip()
if region in self._ids_to_names:
return region
# Title-casing because most country names are title-cased.
region = region.title()
names_to_ids = {v: k for k, v in self._ids_to_names.items()}
if region in names_to_ids:
return names_to_ids[region]
# Finally, attempt to find a prefix match. For regions that could match
# multiple prefixes, the first one found is returned.
for name in names_to_ids:
if name.startswith(region):
return names_to_ids[name]
logging.info('Region "%s" unknown', raw_region)
return None
```
#### File: hypebot/proxies/proxy_lib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import copy
from functools import partial
import json
from absl import logging
from six import with_metaclass
from hypebot.core import cache_lib
from hypebot.core import util_lib
class Proxy(with_metaclass(abc.ABCMeta)):
"""A class to proxy requests."""
_STORAGE_SUBKEY = 'fetch_results'
def __init__(self, store=None):
self._request_cache = cache_lib.LRUCache(256, max_age_secs=60 * 60)
self._store = store
def __repr__(self):
return '<%s.%s with %s>' % (
self.__class__.__module__, self.__class__.__name__, self._request_cache)
@abc.abstractmethod
def _GetUrl(self, url, params, headers=None):
"""Fetches data from a specified URL.
Args:
url: (string) URL to request data from
params: Dict of url GET params.
headers: Dict of custom headers.
Returns:
HTTP response body if it exists, otherwise None
"""
def FlushCache(self):
self._request_cache.Flush()
def RawFetch(self, key, action, validate_fn=None, force_lookup=False,
use_storage=False):
"""Do action, checking cache/storage first and updating key if required.
Actions are arbitrary functions that execute and return values. For example,
an action could be a function that fetches an HTTP request and returns the
response body's JSON.
Args:
key: The key associated with the action to perform.
action: The action to perform if the result is not cached against the key.
validate_fn: Callable that takes the result of action and validates if it
should be cached. If not specified, any non-None return from action is
cached.
force_lookup: If this lookup should bypass the cache and storage lookup.
Note that valid results will still be saved to the cache/storage.
use_storage: If the results of this fetch should be written to a
persistent storage layer. Useful when the data is not expected to
change.
Returns:
The data or None if the action failed.
"""
logging.info('RawFetch for %s', key)
if not force_lookup:
return_data = self._request_cache.Get(key)
if return_data:
return return_data
logging.info('Cache miss for %s', key)
if use_storage and self._store:
try:
return_data = self._store.GetValue(key, self._STORAGE_SUBKEY)
if return_data:
return return_data
except Exception as e:
logging.error('Error fetching %s from storage: %s', key, e)
logging.info('Storage missing %s', key)
return_data = action()
if not validate_fn:
validate_fn = lambda x: True
if return_data and validate_fn(return_data):
self._request_cache.Put(key, return_data)
if use_storage and self._store:
try:
self._store.SetValue(key, self._STORAGE_SUBKEY, return_data)
except Exception as e:
logging.error('Error storing return_data: %s', e)
return return_data
def FetchJson(self, url, params=None, headers=None, force_lookup=False,
use_storage=False, fields_to_erase=None):
"""Returns a python-native version of a JSON response from url."""
try:
params = params or {}
action = partial(self._JsonAction, url, params, headers, fields_to_erase)
# By adding to params, we ensure that it gets added to the cache key.
# Make a copy to avoid sending in the actual request.
params = copy.copy(params)
params['_fields_to_erase'] = fields_to_erase
return json.loads(
self.HTTPFetch(url, params, headers, action, self._ValidateJson,
force_lookup, use_storage) or '{}')
except Exception as e: # pylint: disable=broad-except
self._LogError(url, params, exception=e)
return {}
def _JsonAction(self,
url,
params,
headers,
fields_to_erase=None):
"""Action function for fetching JSON.
This first fetches the data, parses to dict, and then filters and re-encodes
as json so that downstream assumptions about the return data being the raw
string are maintained.
Fields are specified in full path via dot delimiter. If any field in the
path is a list it will operate on all elements.
Tries to be gracious if the path doesn't exist.
E.g., `players.bios` will remove copious amounts of spam from rito.
Args:
url: The url to fetch data from.
params: Data for URL query string.
headers: Headers for the HTTPRequest.
fields_to_erase: Optional list of fields to erase.
Returns:
JSON string.
"""
response = json.loads(self._GetUrl(url, params, headers) or '{}')
for path in fields_to_erase or []:
self._EraseField(response, path.split('.'))
return json.dumps(response)
def _EraseField(self, data, keys):
if not keys or keys[0] not in data:
return
# No more nested levels, go ahead and Erase that data.
if len(keys) == 1:
del data[keys[0]]
return
data = data[keys[0]]
if isinstance(data, list):
for datum in data:
self._EraseField(datum, keys[1:])
else:
self._EraseField(data, keys[1:])
def _ValidateJson(self, return_data):
"""Validates if return_data should be cached by looking for an error key."""
try:
obj = json.loads(return_data or '{}')
# Don't cache 200 replies with errors in the body
return 'error' not in obj
except Exception as e:
logging.error('Failed to decode json object:\nError: %s\nRaw data:%s', e,
return_data)
return False
def HTTPFetch(self,
url,
params=None,
headers=None,
action=None,
validate_fn=None,
force_lookup=False,
use_storage=False):
"""Fetch url, checking the cache/storage first and updating it if required.
Args:
url: The url to fetch data from.
params: Data for URL query string.
headers: Dictionary of headers for HTTPRequest.
action: The action to perform if the result is not cached against the key.
validate_fn: Function used to validate if the response should be cached.
force_lookup: If this lookup should bypass the cache and storage lookup.
Note that valid results will still be saved to the cache/storage.
use_storage: If the results of this fetch should be written to a
persistent storage layer. Useful when the data is not expected to
change.
Returns:
The data or None if the fetch failed.
"""
params = params or {}
if action is None:
action = partial(self._GetUrl, url, params, headers)
return self.RawFetch(
util_lib.SafeUrl(url, params),
action,
validate_fn,
force_lookup=force_lookup,
use_storage=use_storage)
def _LogError(self, url, params=None, error_code=None, exception=None):
"""Logs an error in a standardized format."""
safe_url = util_lib.SafeUrl(url, params)
logging.error('Fetch for %s failed', safe_url)
if error_code:
logging.error(' Error code %s', error_code)
if exception:
logging.exception(exception)
```
#### File: hypebot/stocks/iex_stock.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from typing import Dict, List, Text
from hypebot.core import params_lib
from hypebot.protos import stock_pb2
from hypebot.stocks import stock_lib
class IEXStock(stock_lib.StockLib):
"""Data provided for free by IEX."""
DEFAULT_PARAMS = params_lib.MergeParams(
stock_lib.StockLib.DEFAULT_PARAMS,
{
'base_url': 'https://cloud.iexapis.com/v1',
# Sign up for token at iexcloud.io
'token': None,
})
def __init__(self, params, proxy):
super(IEXStock, self).__init__(params)
self._proxy = proxy
def Quotes(self, symbols: List[Text]) -> Dict[Text, stock_pb2.Quote]:
"""See StockLib.Quotes for details."""
request_params = {
'symbols': ','.join(symbols),
'types': 'quote',
'displayPercent': 'true', # Keep string, not boolean.
'token': self._params.token,
}
response = self._proxy.FetchJson(
os.path.join(self._params.base_url, 'stock/market/batch'),
params=request_params,
force_lookup=True)
stock_info = {}
for symbol, data in response.items():
quote = data['quote']
stock = stock_pb2.Quote(
symbol=symbol,
open=quote.get('open', 0),
close=quote.get('previousClose', 0),
price=quote.get('latestPrice', 0))
# These fields may exist and be `null` in the JSON, so we set the default
# outside of `get()`.
stock.change = quote.get('change') or stock.price - stock.close
stock.change_percent = quote.get('changePercent') or (
stock.change / (stock.close or 1) * 100)
realtime_price = quote.get('iexRealtimePrice')
if realtime_price and abs(realtime_price - stock.price) > 1e-4:
stock.extended_price = realtime_price
stock.extended_change = realtime_price - stock.price
stock.extended_change_percent = int(
float(stock.extended_change) / stock.price * 100 + 0.5)
if stock.price:
stock_info[symbol] = stock
# If it wasn't a stock symbol, try to look it up as a crypto.
for symbol in set(symbols) - set(stock_info):
response = self._proxy.FetchJson(
os.path.join(self._params.base_url, 'crypto', symbol, 'price'),
params={'token': self._params.token},
force_lookup=True)
if response:
stock_info[symbol] = stock_pb2.Quote(
symbol=symbol, price=float(response.get('price', 0)))
return stock_info
def History(self,
symbols: List[Text],
span: Text = '1m') -> Dict[Text, List[float]]:
"""See StockLib.History for details."""
request_params = {
'symbols': ','.join(symbols),
'types': 'chart',
'range': span,
'token': self._params.token,
}
response = self._proxy.FetchJson(
os.path.join(self._params.base_url, 'stock/market/batch'),
params=request_params,
force_lookup=True)
stock_info = {}
for symbol, data in response.items():
stock_info[symbol] = [day['close'] for day in data['chart']][-5:]
return stock_info
```
#### File: hypebot/riot/riot_api_server.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import concurrent
import os
from absl import app
from absl import flags
from absl import logging
from google.protobuf import json_format
import grpc
import requests
from hypebot.protos.riot.v4 import champion_mastery_pb2
from hypebot.protos.riot.v4 import champion_mastery_pb2_grpc
from hypebot.protos.riot.v4 import league_pb2
from hypebot.protos.riot.v4 import league_pb2_grpc
from hypebot.protos.riot.v4 import match_pb2
from hypebot.protos.riot.v4 import match_pb2_grpc
from hypebot.protos.riot.v4 import summoner_pb2
from hypebot.protos.riot.v4 import summoner_pb2_grpc
FLAGS = flags.FLAGS
flags.DEFINE_string('host', 'localhost', 'Which host to use.')
flags.DEFINE_integer('port', 50051, 'Which port to bind to.')
def _convert_metadata_to_dict(metadata):
metadata_dict = {}
for key, value in metadata:
metadata_dict[key] = value
return metadata_dict
def _call_riot(endpoint, params, message, metadata, body_transform=None):
"""Helper function to call rito API.
Args:
endpoint: relative path to endpoint within Riot API.
params: Additional params to pass to the web request.
message: Proto message into which to write response. Note: this is an actual
message object and not simply the type. E.g., match_pb2.Match() not
match_pb2.Match.
metadata: Invocation_metadata from gRPC.
body_transform: Optional function to apply to raw response body prior to
parsing. JSON supports lists as the base object in the response, but
protos do not, so we sometimes need to add a wrapper Dict around the
response.
Returns:
The input message with fields set based on the call.
Raises:
RuntimeError: If request fails.
"""
metadata = _convert_metadata_to_dict(metadata)
url = os.path.join(
'https://%s.api.riotgames.com' % metadata.get('platform-id', 'na1'),
endpoint)
headers = {'X-Riot-Token': metadata['api-key']}
response = requests.get(url, params=params, headers=headers)
if response.status_code != requests.codes.ok:
raise RuntimeError('Failed request for: %s' % url)
body = response.text
if body_transform:
body = body_transform(body)
return json_format.Parse(body, message, ignore_unknown_fields=True)
class ChampionMasteryService(
champion_mastery_pb2_grpc.ChampionMasteryServiceServicer):
"""Champion Mastery API."""
def ListChampionMasteries(self, request, context):
return _call_riot(
'lol/champion-mastery/v4/champion-masteries/by-summoner/%s' %
request.encrypted_summoner_id, {},
champion_mastery_pb2.ListChampionMasteriesResponse(),
context.invocation_metadata(),
body_transform=lambda x: '{"championMasteries": %s }' % x)
def GetChampionMastery(self, request, context):
endpoint = ('lol/champion-mastery/v4/champion-masteries/by-summoner/%s/'
'by-champion/%s' %
(request.encrypted_summoner_id, request.champion_id))
return _call_riot(endpoint, {}, champion_mastery_pb2.ChampionMastery(),
context.invocation_metadata())
def GetChampionMasteryScore(self, request, context):
return _call_riot(
'lol/champion-mastery/v4/scores/by-summoner/%s' %
request.encrypted_summoner_id, {},
champion_mastery_pb2.ChampionMasteryScore(),
context.invocation_metadata(),
body_transform=lambda x: '{"score": %s }' % x)
class MatchService(match_pb2_grpc.MatchServiceServicer):
"""Match API."""
def ListMatches(self, request, context):
params = {}
if request.queues:
params['queue'] = [int(q) for q in request.queues]
if request.seasons:
params['season'] = [int(s) for s in request.seasons]
if request.champions:
params['champions'] = request.seasons
if request.begin_time_ms:
params['beginTime'] = request.begin_time_ms
params['endTime'] = request.end_time_ms
if request.begin_index:
params['beginIndex'] = request.begin_index
params['endIndex'] = request.end_index
return _call_riot(
'lol/match/v4/matchlists/by-account/%s' % request.encrypted_account_id,
params, match_pb2.ListMatchesResponse(), context.invocation_metadata())
def ListTournamentMatchIds(self, request, context):
return _call_riot(
'lol/match/v4/matches/by-tournament-code/%s/ids' %
request.tournament_code, {}, match_pb2.ListTournamentMatchIdsResponse(),
context.invocation_metadata())
def GetMatch(self, request, context):
endpoint = 'lol/match/v4/matches/%s' % request.game_id
if request.tournament_code:
endpoint += '/by-tournament-code/%s' % request.tournament_code
return _call_riot(endpoint, {}, match_pb2.Match(),
context.invocation_metadata())
class SummonerService(summoner_pb2_grpc.SummonerServiceServicer):
"""Summoner API."""
def GetSummoner(self, request, context):
endpoint = 'lol/summoner/v4/summoners'
key_type = request.WhichOneof('key')
if key_type == 'encrypted_summoner_id':
endpoint += '/%s' % request.encrypted_summoner_id
elif key_type == 'encrypted_account_id':
endpoint += '/by-account/%s' % request.encrypted_account_id
elif key_type == 'summoner_name':
endpoint += '/by-name/%s' % request.summoner_name
elif key_type == 'encrypted_puuid':
endpoint += '/by-puuid/%s' % request.encrypted_puuid
else:
raise ValueError('GetSummoner: no key specified')
return _call_riot(endpoint, {}, summoner_pb2.Summoner(),
context.invocation_metadata())
class LeagueService(league_pb2_grpc.LeagueServiceServicer):
"""League API."""
def ListLeaguePositions(self, request, context):
endpoint = ('lol/league/v4/entries/by-summoner/%s' %
request.encrypted_summoner_id)
return _call_riot(
endpoint, {},
league_pb2.ListLeaguePositionsResponse(),
context.invocation_metadata(),
body_transform=lambda x: '{"positions": %s }' % x)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=10))
champion_mastery_pb2_grpc.add_ChampionMasteryServiceServicer_to_server(
ChampionMasteryService(), server)
league_pb2_grpc.add_LeagueServiceServicer_to_server(LeagueService(), server)
match_pb2_grpc.add_MatchServiceServicer_to_server(MatchService(), server)
summoner_pb2_grpc.add_SummonerServiceServicer_to_server(
SummonerService(), server)
authority = '%s:%s' % (FLAGS.host, FLAGS.port)
logging.info('Starting server at %s', authority)
server.add_insecure_port(authority)
server.start()
server.wait_for_termination()
if __name__ == '__main__':
app.run(main)
```
|
{
"source": "jelman/baracus",
"score": 2
}
|
#### File: baracus/baracus/utils.py
```python
import os
import subprocess
from subprocess import Popen, PIPE
import pandas as pd
import numpy as np
import statsmodels.api as sm
import nibabel as nb
def run(command, env={}, ignore_errors=False):
merged_env = os.environ
merged_env.update(env)
# DEBUG env triggers freesurfer to produce gigabytes of files
merged_env.pop('DEBUG', None)
process = Popen(command, stdout=PIPE, stderr=subprocess.STDOUT, shell=True, env=merged_env)
while True:
line = process.stdout.readline()
line = str(line, 'utf-8')[:-1]
print(line)
if line == '' and process.poll() != None:
break
if process.returncode != 0 and not ignore_errors:
raise Exception("Non zero return code: %d" % process.returncode)
def run_fs_if_not_available(bids_dir, freesurfer_dir, subject_label, license_key, n_cpus, sessions=[], skip_missing=False):
freesurfer_subjects = []
if sessions:
# long
for session_label in sessions:
freesurfer_subjects.append("sub-{sub}_ses-{ses}".format(sub=subject_label, ses=session_label))
else:
# cross
freesurfer_subjects.append("sub-{sub}".format(sub=subject_label))
fs_missing = False
for fss in freesurfer_subjects:
if not os.path.exists(os.path.join(freesurfer_dir, fss, "scripts/recon-all.done")):
if skip_missing:
freesurfer_subjects.remove(fss)
else:
fs_missing = True
if fs_missing:
cmd = "run_freesurfer.py {in_dir} {out_dir} participant " \
"--hires_mode disable " \
"--participant_label {subject_label} " \
"--license_key {license_key} " \
"--n_cpus {n_cpus} --steps cross-sectional".format(in_dir=bids_dir,
out_dir=freesurfer_dir,
subject_label=subject_label,
license_key=license_key,
n_cpus=n_cpus)
print("Freesurfer for {} not found. Running recon-all: {}".format(subject_label, cmd))
run(cmd)
for fss in freesurfer_subjects:
aseg_file = os.path.join(freesurfer_dir, fss, "stats/aseg.stats")
if not os.path.isfile(aseg_file):
if skip_missing:
freesurfer_subjects.remove(fss)
else:
raise FileNotFoundError(aseg_file)
return freesurfer_subjects
def get_subjects_session(layout, participant_label, truly_longitudinal_study):
valid_subjects = layout.get_subjects(modality="anat", type="T1w")
freesurfer_subjects = []
if participant_label:
subjects_to_analyze = set(participant_label) & set(valid_subjects)
subjects_not_found = set(participant_label) - set(subjects_to_analyze)
if subjects_not_found:
raise Exception("Requested subjects not found or do not have required data: {}".format(subjects_not_found))
else:
subjects_to_analyze = valid_subjects
sessions_to_analyze = {}
for subject in subjects_to_analyze:
if truly_longitudinal_study:
sessions = layout.get_sessions(modality="anat", type="T1w", subject=subject)
sessions_to_analyze[subject] = sessions
for session in sessions:
freesurfer_subjects.append("sub-{sub}_ses-{ses}".format(sub=subject, ses=session))
else:
freesurfer_subjects.append("sub-{sub}".format(sub=subject))
return subjects_to_analyze, sessions_to_analyze, freesurfer_subjects
def get_residuals(X, Y):
if len(Y.shape) == 1:
Y = np.atleast_2d(Y).T
betah = np.linalg.pinv(X).dot(Y)
Yfitted = X.dot(betah)
resid = Y - Yfitted
return np.squeeze(betah[0] + resid.values)
def remove_confounds(data_files, confound_file):
data_df = pd.DataFrame.from_dict(data_files, orient='index')
confounds = pd.read_csv(confound_file)
confounds = confounds.set_index(confounds.columns[0])
if (confounds.index.isin(data_df.index)).all():
confounds = confounds.reindex(data_df.index)
else:
raise Exception("Subjects in confound file and subject directory do not match. Make sure subject ID is in first column of confound file.")
X = confounds
X = sm.add_constant(X)
all_surfs = ['lh_thickness_file', 'rh_thickness_file', 'lh_area_file', 'rh_area_file']
for surf in all_surfs:
filelist = data_df[surf]
allsub_surf = []
for f in filelist:
img = nb.load(f)
in_data = img.get_fdata().squeeze()
allsub_surf.append(in_data)
allsub_surf = pd.DataFrame(allsub_surf)
surf_resid = allsub_surf.transform(lambda y: get_residuals(X,y), axis=0)
for i, f in enumerate(filelist):
out_data = surf_resid.iloc[i,:].values
outimg = nb.freesurfer.mghformat.MGHImage(out_data.astype(np.float32), np.eye(4))
nb.save(outimg, f)
aseg_files = data_df['aseg_file']
allsub_aseg = pd.DataFrame()
for aseg_f in aseg_files:
aseg_df = pd.read_csv(aseg_f, index_col=0, delimiter='\t')
allsub_aseg = allsub_aseg.append(aseg_df)
aseg_resid = allsub_aseg.transform(lambda y: get_residuals(X,y), axis=0)
for i, f in enumerate(aseg_files):
out_data = aseg_resid.iloc[[i]]
out_data.to_csv(f, sep='\t', index=True)
```
|
{
"source": "Jelmerb/ansible-workstation-common",
"score": 2
}
|
#### File: default/tests/test_default.py
```python
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# Documentation:
# - https://testinfra.readthedocs.io/en/latest/modules.html#host
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_nginx_is_installed(host):
nginx = host.package('nginx')
assert nginx.is_installed
def test_nginx_config_test_is_positive(host):
nginx = host.run('nginx -t')
assert nginx.rc == 0
assert '[warn]' not in nginx.stderr
assert '[error]' not in nginx.stderr
```
|
{
"source": "jelmer/better-bencode",
"score": 2
}
|
#### File: better-bencode/better_bencode/_pure.py
```python
import sys
if sys.version_info[0] == 2:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
else:
from io import BytesIO as StringIO
if sys.version_info[0] == 2:
INTEGER_TYPES = (int, long)
BINARY_TYPES = (str, )
int_to_binary = lambda val: str(val)
else:
INTEGER_TYPES = (int,)
BINARY_TYPES = (bytes, )
int_to_binary = lambda val: bytes(str(val), 'ascii')
class BencodeValueError(ValueError):
pass
class BencodeTypeError(TypeError):
pass
def _dump_implementation(obj, write, path, cast):
""" dump()/dumps() implementation """
t = type(obj)
if id(obj) in path:
raise BencodeValueError('circular reference detected')
if t in INTEGER_TYPES:
write(b'i')
write(int_to_binary(obj))
write(b'e')
elif t in BINARY_TYPES:
write(int_to_binary(len(obj)))
write(b':')
write(obj)
elif t is list or (cast and issubclass(t, (list, tuple))):
write(b'l')
for item in obj:
_dump_implementation(item, write, path + [id(obj)], cast)
write(b'e')
elif t is dict:
write(b'd')
data = sorted(obj.items())
for key, val in data:
_dump_implementation(key, write, path + [id(obj)], cast)
_dump_implementation(val, write, path + [id(obj)], cast)
write(b'e')
elif cast and t is bool:
write(b'i')
write(int_to_binary(int(obj)))
write(b'e')
else:
raise BencodeTypeError(
'type %s is not Bencode serializable' % type(obj).__name__
)
def dump(obj, fp, cast=False):
"""Serialize ``obj`` as a Bencode formatted stream to ``fp``."""
_dump_implementation(obj, fp.write, [], cast)
def dumps(obj, cast=False):
"""Serialize ``obj`` to a Bencode formatted ``str``."""
fp = []
_dump_implementation(obj, fp.append, [], cast)
return b''.join(fp)
def _read_until(delimiter, read):
""" Read char by char until ``delimiter`` occurs. """
result = b''
ch = read(1)
if not ch:
raise BencodeValueError('unexpected end of data')
while ch != delimiter:
result += ch
ch = read(1)
if not ch:
raise BencodeValueError('unexpected end of data')
return result
def _load_implementation(read):
""" load()/loads() implementation """
first = read(1)
if first == b'e':
return StopIteration
elif first == b'i':
value = b''
ch = read(1)
while (b'0' <= ch <= b'9') or (ch == b'-'):
value += ch
ch = read(1)
if ch == b'' or (ch == b'e' and value in (b'', b'-')):
raise BencodeValueError('unexpected end of data')
if ch != b'e':
raise BencodeValueError('unexpected byte 0x%.2x' % ord(ch))
return int(value)
elif b'0' <= first <= b'9':
size = 0
while b'0' <= first <= b'9':
size = size * 10 + (ord(first) - ord('0'))
first = read(1)
if first == b'':
raise BencodeValueError('unexpected end of data')
if first != b':':
raise BencodeValueError('unexpected byte 0x%.2x' % ord(first))
data = read(size)
if len(data) != size:
raise BencodeValueError('unexpected end of data')
return data
elif first == b'l':
result = []
while True:
val = _load_implementation(read)
if val is StopIteration:
return result
result.append(val)
elif first == b'd':
result = {}
while True:
this = read(1)
if this == b'e':
return result
elif this == b'':
raise BencodeValueError('unexpected end of data')
elif not this.isdigit():
raise BencodeValueError('unexpected byte 0x%.2x' % ord(this))
size = int(this + _read_until(b':', read))
key = read(size)
val = _load_implementation(read)
result[key] = val
elif first == b'':
raise BencodeValueError('unexpected end of data')
else:
raise BencodeValueError('unexpected byte 0x%.2x' % ord(first))
def load(fp):
"""Deserialize ``fp`` to a Python object."""
return _load_implementation(fp.read)
def loads(data):
"""Deserialize ``s`` to a Python object."""
fp = StringIO(data)
return _load_implementation(fp.read)
```
#### File: better-bencode/tests/test_bencode.py
```python
import sys
import os.path
# remove top repository dir to avoid importing local code
sys.path = [
directory
for directory in sys.path
if not os.path.exists(os.path.join(directory, 'README.rst'))
]
if sys.version_info[0] == 2:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
else:
from io import BytesIO as StringIO
import pytest
import better_bencode._pure as pure
import better_bencode as auto
try:
import better_bencode._fast as fast
except ImportError as e:
print(e)
fast = None
MODULES = [module for module in [auto, fast, pure] if module is not None]
@pytest.mark.parametrize('module', MODULES)
def test_error_load(module):
assert hasattr(module, 'BencodeValueError')
assert issubclass(module.BencodeValueError, ValueError)
@pytest.mark.parametrize('module', MODULES)
def test_error_dump(module):
assert hasattr(module, 'BencodeTypeError')
assert issubclass(module.BencodeTypeError, TypeError)
TEST_DATA = [
(b'de', {}),
(b'le', []),
(b'i0e', 0),
(b'i42e', 42),
(b'i-42e', -42),
(b'i9223372036854775807e', 2**63-1),
(b'i-9223372036854775808e', -(2**63)),
(b'0:', b''),
(b'4:spam', b'spam'),
(b'l4:spami42ee', [b'spam', 42]),
(b'd3:fooi42ee', {b'foo': 42}),
(b'd3:bar4:spam3:fooi42ee', {b'bar': b'spam', b'foo': 42}),
(b'd1:ai1e1:bi2e1:ci3ee', {b'a': 1, b'b': 2, b'c': 3}),
(b'd1:a1:be', {b'a': b'b'}),
]
TESTS = [
(module,) + test
for module in MODULES
for test in TEST_DATA
]
@pytest.mark.parametrize(('module', 'binary', 'struct'), TESTS)
def test_loads(module, binary, struct):
assert module.loads(binary) == struct
@pytest.mark.parametrize(('module', 'binary', 'struct'), TESTS)
def test_load(module, binary, struct):
fp = StringIO(binary)
assert module.load(fp) == struct
@pytest.mark.parametrize(('module', 'binary', 'struct'), TESTS)
def test_dumps(module, binary, struct):
assert module.dumps(struct) == binary
@pytest.mark.parametrize(('module', 'binary', 'struct'), TESTS)
def test_dump(module, binary, struct):
fp = StringIO()
module.dump(struct, fp)
assert fp.getvalue() == binary
#####################################################################
# dump TypeError tests
TESTS_TYPEERROR = [
(module, test)
for module in MODULES
for test in [
u'', (), set(), frozenset(),
len, TypeError,
True, False, None, 1.0,
]
]
@pytest.mark.parametrize(('module', 'struct'), TESTS_TYPEERROR)
def test_dump_typeerror(module, struct):
with pytest.raises(TypeError) as excinfo:
fp = StringIO()
module.dump(struct, fp)
assert type(struct).__name__ in str(excinfo.value)
@pytest.mark.parametrize(('module', 'struct'), TESTS_TYPEERROR)
def test_dump_dumperror(module, struct):
with pytest.raises(module.BencodeTypeError) as excinfo:
fp = StringIO()
module.dump(struct, fp)
assert type(struct).__name__ in str(excinfo.value)
@pytest.mark.parametrize(('module', 'struct'), TESTS_TYPEERROR)
def test_dumps_typeerror(module, struct):
with pytest.raises(TypeError) as excinfo:
module.dumps(struct)
assert type(struct).__name__ in str(excinfo.value)
@pytest.mark.parametrize(('module', 'struct'), TESTS_TYPEERROR)
def test_dumps_dumperror(module, struct):
with pytest.raises(module.BencodeTypeError) as excinfo:
module.dumps(struct)
assert type(struct).__name__ in str(excinfo.value)
@pytest.mark.parametrize('module', MODULES)
def test_dumps_reference_list(module):
a = [[]]
a[0].append(a)
with pytest.raises(ValueError) as excinfo:
module.dumps(a)
assert str(excinfo.value) == 'circular reference detected'
@pytest.mark.parametrize('module', MODULES)
def test_dumps_reference_list_deep(module):
a = [[[[[[[[[[[[[[[]]]]]]]]]]]]]]]
a[0][0][0][0][0][0][0][0][0][0][0][0][0].append(a)
with pytest.raises(ValueError) as excinfo:
module.dumps(a)
assert str(excinfo.value) == 'circular reference detected'
@pytest.mark.parametrize('module', MODULES)
def test_dumps_reference_dict(module):
a = {b'a': {b'b': {}}}
a[b'a'][b'b'][b'c'] = a
with pytest.raises(ValueError) as excinfo:
module.dumps(a)
assert str(excinfo.value) == 'circular reference detected'
#####################################################################
# load ValueError tests
TESTS_VALUEERROR = [
(module, binary, msg)
for module in MODULES
for binary, msg in [
(b'<', 'unexpected byte 0x3c'),
(b' ', 'unexpected byte 0x20'),
(b'x', 'unexpected byte 0x78'),
(b'', 'unexpected end of data'),
(b'1', 'unexpected end of data'),
(b'1:', 'unexpected end of data'),
(b'1x', 'unexpected byte 0x78'),
(b'i', 'unexpected end of data'),
(b'ie', 'unexpected end of data'),
(b'i-e', 'unexpected end of data'),
(b'ixe', 'unexpected byte 0x78'),
(b'l', 'unexpected end of data'),
(b'lx', 'unexpected byte 0x78'),
(b'lxe', 'unexpected byte 0x78'),
(b'l1:a', 'unexpected end of data'),
(b'l1:ax', 'unexpected byte 0x78'),
(b'd', 'unexpected end of data'),
(b'dx', 'unexpected byte 0x78'),
(b'dxe', 'unexpected byte 0x78'),
(b'd1:a', 'unexpected end of data'),
(b'd1:ax', 'unexpected byte 0x78'),
(b'd1:a1:b', 'unexpected end of data'),
(b'd1:a1:bx', 'unexpected byte 0x78'),
]
]
@pytest.mark.parametrize(('module', 'binary', 'msg'), TESTS_VALUEERROR)
def test_load_valueerror(module, binary, msg):
with pytest.raises(ValueError) as excinfo:
fp = StringIO(binary)
module.load(fp)
assert str(excinfo.value) == msg
@pytest.mark.parametrize(('module', 'binary', 'msg'), TESTS_VALUEERROR)
def test_loads_valueerror(module, binary, msg):
with pytest.raises(ValueError) as excinfo:
module.loads(binary)
assert str(excinfo.value) == msg
#####################################################################
# docstrings
@pytest.mark.parametrize('module', MODULES)
def test_docstrings_dump(module):
assert module.dump.__doc__ == "Serialize ``obj`` as a Bencode formatted stream to ``fp``."
@pytest.mark.parametrize('module', MODULES)
def test_docstrings_dumps(module):
assert module.dumps.__doc__ == "Serialize ``obj`` to a Bencode formatted ``str``."
@pytest.mark.parametrize('module', MODULES)
def test_docstrings_load(module):
assert module.load.__doc__ == "Deserialize ``fp`` to a Python object."
@pytest.mark.parametrize('module', MODULES)
def test_docstrings_loads(module):
assert module.loads.__doc__ == "Deserialize ``s`` to a Python object."
#####################################################################
# cast
from collections import namedtuple
Point = namedtuple('Point', 'x y')
class MyList(list):
def __init__(self, *data):
list.__init__(self, data)
CAST_TEST_DATA = [
(False, b'i0e', 0),
(True, b'i1e', 1),
((), b'le', []),
((1,), b'li1ee', [1]),
(Point(-1, 1), b'li-1ei1ee', [-1, 1]),
(MyList(-1, 1), b'li-1ei1ee', [-1, 1]),
]
CAST_TESTS = [
(module,) + test
for module in MODULES
for test in CAST_TEST_DATA
]
@pytest.mark.parametrize(('module', 'indata', 'binary', 'outdata'), CAST_TESTS)
def test_cast_dumps_ok(module, indata, binary, outdata):
dumped = module.dumps(indata, cast=True)
assert dumped == binary
assert module.loads(dumped) == outdata
@pytest.mark.parametrize(('module', 'indata', 'binary', 'outdata'), CAST_TESTS)
def test_cast_dumps_error(module, indata, binary, outdata):
with pytest.raises(TypeError) as excinfo:
module.dumps(indata)
```
|
{
"source": "jelmer/borgbackup",
"score": 2
}
|
#### File: generic_files/tests/test_client_restore.py
```python
import os
import pytest
from testinfra.utils.ansible_runner import AnsibleRunner
testinfra_hosts = ["ansible://all:!borgbackup_servers"]
def test_client_sample_file(host):
sample = host.file("/root/sample.txt")
assert sample.is_file
@pytest.mark.parametrize('server', AnsibleRunner(os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('borgbackup_servers'))
def test_client_dir(host, server):
command = host.run("diff -s /root/sample.txt /root/restore/%s/root/sample.txt" % server)
assert command.rc == 0
assert "Files /root/sample.txt and /root/restore/%s/root/sample.txt are identical" % server in command.stdout
@pytest.mark.parametrize('server', AnsibleRunner(os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('borgbackup_servers'))
def test_client_verify(host, server):
vcommand = host.run("/root/restore.sh verify")
assert vcommand.rc == 0
assert vcommand.stdout.rstrip("verifying on %s" % server)
```
|
{
"source": "jelmer/datalad",
"score": 2
}
|
#### File: distribution/tests/test_dataset_config.py
```python
from ..dataset import Dataset
from os.path import join as opj
from nose.tools import ok_, eq_, assert_false, assert_equal, assert_true
from datalad.tests.utils import with_tree
from datalad.api import create
# Let's document any configuration option supported in this
# reference configuration
_config_file_content = """\
[datalad "dataset"]
id = nothing
"""
_dataset_config_template = {
'ds': {
'.datalad': {
'config': _config_file_content}}}
@with_tree(tree=_dataset_config_template)
def test_configuration_access(path):
ds = Dataset(opj(path, 'ds'))
# there is something prior creation
assert_true(ds.config is not None)
# creation must change the uuid setting
assert_equal(ds.config['datalad.dataset.id'], 'nothing')
# create resets this value and records the actual uuid
ds.create(force=True)
assert_equal(ds.config.get_value('datalad.dataset', 'id', default='nothing'), ds.id)
```
#### File: datalad/interface/run_procedure.py
```python
__docformat__ = 'restructuredtext'
import logging
from glob import iglob
from argparse import REMAINDER
import os
import os.path as op
import stat
from datalad import cfg
from datalad.interface.base import Interface
from datalad.interface.utils import eval_results
from datalad.interface.base import build_doc
from datalad.interface.results import get_status_dict
from datalad.distribution.dataset import Dataset
from datalad.distribution.dataset import require_dataset
from datalad.distribution.dataset import EnsureDataset
from datalad.support.constraints import EnsureNone
from datalad.support.param import Parameter
from datalad.distribution.dataset import datasetmethod
from datalad.support.exceptions import InsufficientArgumentsError
from datalad.utils import assure_list
# bound dataset methods
from datalad.interface.run import Run
lgr = logging.getLogger('datalad.interface.run_procedures')
def _get_file_match(dir, name='*'):
targets = (name, ('[!_]*.py'), ('[!_]*.sh'))
lgr.debug("Looking for procedure '%s' in '%s'", name, dir)
for target in targets:
for m in iglob(op.join(dir, target)):
m_bn = op.basename(m)
if name == '*' or m_bn == name or m_bn.startswith('{}.'.format(name)):
yield m
def _get_procedure_implementation(name='*', ds=None):
ds = ds if isinstance(ds, Dataset) else Dataset(ds) if ds else None
# 1. check dataset for procedure
if ds is not None and ds.is_installed():
# could be more than one
dirs = assure_list(ds.config.obtain('datalad.locations.dataset-procedures'))
for dir in dirs:
# TODO `get` dirs if necessary
for m in _get_file_match(op.join(ds.path, dir), name):
yield m
# 2. check system and user account for procedure
for loc in (cfg.obtain('datalad.locations.user-procedures'),
cfg.obtain('datalad.locations.system-procedures')):
for dir in assure_list(loc):
for m in _get_file_match(dir, name):
yield m
# 3. check extensions for procedure
# delay heavy import until here
from pkg_resources import iter_entry_points
from pkg_resources import resource_isdir
from pkg_resources import resource_filename
for entry_point in iter_entry_points('datalad.extensions'):
# use of '/' here is OK wrt to platform compatibility
if resource_isdir(entry_point.module_name, 'resources/procedures'):
for m in _get_file_match(
resource_filename(
entry_point.module_name,
'resources/procedures'),
name):
yield m
# 4. at last check datalad itself for procedure
for m in _get_file_match(
resource_filename('datalad', 'resources/procedures'),
name):
yield m
def _guess_exec(script_file):
# TODO check for exec permission and rely on interpreter
if os.stat(script_file).st_mode & stat.S_IEXEC:
return ('executable', u'"{script}" "{ds}" {args}')
elif script_file.endswith('.sh'):
return (u'bash_script', u'bash "{script}" "{ds}" {args}')
elif script_file.endswith('.py'):
return (u'python_script', u'python "{script}" "{ds}" {args}')
else:
return None
@build_doc
class RunProcedure(Interface):
"""Run prepared procedures (DataLad scripts) on a dataset
*Concept*
A "procedure" is an algorithm with the purpose to process a dataset in a
particular way. Procedures can be useful in a wide range of scenarios,
like adjusting dataset configuration in a uniform fashion, populating
a dataset with particular content, or automating other routine tasks,
such as synchronizing dataset content with certain siblings.
Implementations of some procedures are shipped together with DataLad,
but additional procedures can be provided by 1) any DataLad extension,
2) any dataset, 3) a local user, or 4) a local system administrator.
DataLad will look for procedures in the following locations and order:
Directories identified by the configuration settings
- 'datalad.locations.dataset-procedures'
- 'datalad.locations.user-procedures' (determined by
appdirs.user_config_dir; defaults to '$HOME/.config/datalad/procedures'
on GNU/Linux systems)
- 'datalad.locations.system-procedures' (determined by
appdirs.site_config_dir; defaults to '/etc/xdg/datalad/procedures' on
GNU/Linux systems)
and subsequently in the 'resources/procedures/' directories of any
installed extension, and, lastly, of the DataLad installation itself.
Each configuration setting can occur multiple times to indicate multiple
directories to be searched. If a procedure matching a given name is found
(filename without a possible extension), the search is aborted and this
implementation will be executed. This makes it possible for individual
datasets, users, or machines to override externally provided procedures
(enabling the implementation of customizable processing "hooks").
*Procedure implementation*
A procedure can be any executable. Executables must have the appropriate
permissions and, in the case of a script, must contain an appropriate
"shebang" line. If a procedure is not executable, but its filename ends
with '.py', it is automatically executed by the 'python' interpreter
(whichever version is available in the present environment). Likewise,
procedure implementations ending on '.sh' are executed via 'bash'.
Procedures can implement any argument handling, but must be capable
of taking at least one positional argument (the absolute path to the
dataset they shall operate on).
*Customize other commands with procedures*
On execution of any commands, DataLad inspects two additional
configuration settings:
- 'datalad.<name>.proc-pre'
- 'datalad.<name>.proc-post'
where '<name>' is the name of a DataLad command. Using this mechanism
DataLad can be instructed to run one or more procedures before or
after the execution of a given command. For example, configuring
a set of metadata types in any newly created dataset can be achieved
via:
% datalad -c 'datalad.create.proc-post=cfg_metadatatypes xmp image' create -d myds
As procedures run on datasets, it is necessary to explicitly identify
the target dataset via the -d (--dataset) option.
"""
_params_ = dict(
spec=Parameter(
args=("spec",),
metavar='NAME [ARGS]',
nargs=REMAINDER,
doc="""Name and possibly additional arguments of the
to-be-executed procedure."""),
dataset=Parameter(
args=("-d", "--dataset"),
metavar="PATH",
doc="""specify the dataset to run the procedure on.
An attempt is made to identify the dataset based on the current
working directory.""",
constraints=EnsureDataset() | EnsureNone()),
discover=Parameter(
args=('--discover',),
action='store_true',
doc="""if given, all configured paths are searched for procedures
and one result record per discovered procedure is yielded, but
no procedure is executed"""),
)
@staticmethod
@datasetmethod(name='run_procedure')
@eval_results
def __call__(
spec=None,
dataset=None,
discover=False):
if not spec and not discover:
raise InsufficientArgumentsError('requires at least a procedure name')
ds = require_dataset(
dataset, check_installed=False,
purpose='run a procedure') if dataset else None
if discover:
reported = set()
for m in _get_procedure_implementation('*', ds=ds):
if m in reported:
continue
cmd_type, cmd_tmpl = _guess_exec(m)
res = get_status_dict(
action='run_procedure',
path=m,
type='file',
logger=lgr,
refds=ds.path if ds else None,
status='ok',
procedure_type=cmd_type,
procedure_callfmt=cmd_tmpl,
message=cmd_type)
reported.add(m)
yield res
return
if not isinstance(spec, (tuple, list)):
# maybe coming from config
import shlex
spec = shlex.split(spec)
name = spec[0]
args = spec[1:]
try:
# get the first match an run with it
procedure_file = next(_get_procedure_implementation(name, ds=ds))
except StopIteration:
# TODO error result
raise ValueError("Cannot find procedure with name '%s'", name)
cmd_type, cmd_tmpl = _guess_exec(procedure_file)
if cmd_tmpl is None:
raise ValueError(
"No idea how to execute procedure %s. Missing 'execute' permissions?",
procedure_file)
cmd = cmd_tmpl.format(
script=procedure_file,
ds=ds.path if ds else '',
args=u' '.join(u'"{}"'.format(a) for a in args) if args else '')
lgr.debug('Attempt to run procedure {} as: {}'.format(
name,
cmd))
for r in Run.__call__(
cmd=cmd,
dataset=ds,
explicit=True,
inputs=None,
outputs=None,
# pass through here
on_failure='ignore',
):
yield r
```
#### File: datalad/support/external_versions.py
```python
import sys
from os import linesep
from six import string_types
from six import binary_type
from distutils.version import LooseVersion
from datalad.dochelpers import exc_str
from datalad.log import lgr
# import version helper from config to have only one implementation
# config needs this to avoid circular imports
from datalad.config import get_git_version as __get_git_version
from .exceptions import CommandError
__all__ = ['UnknownVersion', 'ExternalVersions', 'external_versions']
# To depict an unknown version, which can't be compared by mistake etc
class UnknownVersion:
"""For internal use
"""
def __str__(self):
return "UNKNOWN"
def __cmp__(self, other):
if other is self:
return 0
raise TypeError("UNKNOWN version is not comparable")
#
# Custom handlers
#
from datalad.cmd import Runner
from datalad.cmd import GitRunner
from datalad.support.exceptions import (
MissingExternalDependency,
OutdatedExternalDependency,
)
_runner = Runner()
_git_runner = GitRunner()
def _get_annex_version():
"""Return version of available git-annex"""
try:
return _runner.run('git annex version --raw'.split())[0]
except CommandError:
# fall back on method that could work with older installations
out, err = _runner.run(['git', 'annex', 'version'])
return out.split('\n')[0].split(':')[1].strip()
def _get_git_version():
"""Return version of git we use (might be bundled)"""
return __get_git_version(_git_runner)
def _get_system_git_version():
"""Return version of git available system-wide
Might be different from the one we are using, which might be
bundled with git-annex
"""
return __get_git_version(_runner)
def _get_system_ssh_version():
"""Return version of ssh available system-wide
Annex prior 20170302 was using bundled version, but now would use system one
if installed
"""
try:
out, err = _runner.run('ssh -V'.split(),
expect_fail=True, expect_stderr=True)
# apparently spits out to err but I wouldn't trust it blindly
if err.startswith('OpenSSH'):
out = err
assert out.startswith('OpenSSH') # that is the only one we care about atm
return out.split(' ', 1)[0].rstrip(',.').split('_')[1]
except CommandError as exc:
lgr.debug("Could not determine version of ssh available: %s", exc_str(exc))
return None
class ExternalVersions(object):
"""Helper to figure out/use versions of the externals (modules, cmdline tools, etc).
To avoid collision between names of python modules and command line tools,
prepend names for command line tools with `cmd:`.
It maintains a dictionary of `distuil.version.LooseVersion`s to make
comparisons easy. Note that even if version string conform the StrictVersion
"standard", LooseVersion will be used. If version can't be deduced for the
external, `UnknownVersion()` is assigned. If external is not present (can't
be imported, or custom check throws exception), None is returned without
storing it, so later call will re-evaluate fully.
"""
UNKNOWN = UnknownVersion()
CUSTOM = {
'cmd:annex': _get_annex_version,
'cmd:git': _get_git_version,
'cmd:system-git': _get_system_git_version,
'cmd:system-ssh': _get_system_ssh_version,
}
INTERESTING = (
'appdirs',
'boto',
'exifread',
'git',
'gitdb',
'humanize',
'iso8601',
'msgpack',
'mutagen',
'patool',
'requests',
'scrapy',
'six',
'wrapt',
)
def __init__(self):
self._versions = {}
@classmethod
def _deduce_version(klass, value):
version = None
# see if it is something containing a version
for attr in ('__version__', 'version'):
if hasattr(value, attr):
version = getattr(value, attr)
break
# try pkg_resources
if version is None and hasattr(value, '__name__'):
try:
import pkg_resources
version = pkg_resources.get_distribution(value.__name__).version
except Exception:
pass
# assume that value is the version
if version is None:
version = value
# do type analysis
if isinstance(version, (tuple, list)):
# Generate string representation
version = ".".join(str(x) for x in version)
elif isinstance(version, binary_type):
version = version.decode()
elif isinstance(version, string_types):
pass
else:
version = None
if version:
return LooseVersion(version)
else:
return klass.UNKNOWN
def __getitem__(self, module):
# when ran straight in its source code -- fails to discover nipy's version.. TODO
#if module == 'nipy':
# import pdb; pdb.set_trace()
if not isinstance(module, string_types):
modname = module.__name__
else:
modname = module
module = None
# Early returns None so we do not store prev result for them
# and allow users to install things at run time, so later check
# doesn't pick it up from the _versions
if modname not in self._versions:
version = None # by default -- not present
if modname in self.CUSTOM:
try:
version = self.CUSTOM[modname]()
version = self._deduce_version(version)
except Exception as exc:
lgr.debug("Failed to deduce version of %s due to %s"
% (modname, exc_str(exc)))
return None
else:
if module is None:
if modname not in sys.modules:
try:
module = __import__(modname)
except ImportError:
lgr.debug("Module %s seems to be not present" % modname)
return None
except Exception as exc:
lgr.warning("Failed to import module %s due to %s",
modname, exc_str(exc))
return None
else:
module = sys.modules[modname]
if module:
version = self._deduce_version(module)
self._versions[modname] = version
return self._versions.get(modname, self.UNKNOWN)
def keys(self):
"""Return names of the known modules"""
return self._versions.keys()
def __contains__(self, item):
return item in self._versions
@property
def versions(self):
"""Return dictionary (copy) of versions"""
return self._versions.copy()
def dumps(self, indent=None, preamble="Versions:", query=False):
"""Return listing of versions as a string
Parameters
----------
indent: bool or str, optional
If set would instruct on how to indent entries (if just True, ' '
is used). Otherwise returned in a single line
preamble: str, optional
What preamble to the listing to use
query : bool, optional
To query for versions of all "registered" custom externals, so to
get those which weren't queried for yet
"""
if query:
[self[k] for k in tuple(self.CUSTOM) + self.INTERESTING]
if indent and (indent is True):
indent = ' '
items = ["%s=%s" % (k, self._versions[k]) for k in sorted(self._versions)]
out = "%s" % preamble if preamble else ''
if indent is not None:
if preamble:
preamble += linesep
indent = ' ' if indent is True else str(indent)
out += (linesep + indent).join(items) + linesep
else:
out += " " + ' '.join(items)
return out
def check(self, name, min_version=None, msg=""):
"""Check if an external (optionally of specified min version) present
Parameters
----------
name: str
Name of the external (typically a Python module)
min_version: str or version, optional
Minimal version to satisfy
msg: str, optional
An additional message to include into the exception message
Raises
------
MissingExternalDependency
if the external is completely missing
OutdatedExternalDependency
if the external is present but does not satisfy the min_version
"""
ver_present = self[name]
if ver_present is None:
raise MissingExternalDependency(
name, ver=min_version, msg=msg)
elif min_version and ver_present < min_version:
raise OutdatedExternalDependency(
name, ver=min_version, ver_present=ver_present, msg=msg)
external_versions = ExternalVersions()
```
#### File: datalad/support/keyring_.py
```python
import os
class Keyring(object):
"""Adapter to keyring module
It also delays import of keyring which takes 300ms I guess due to all plugins etc
"""
def __init__(self):
self.__keyring = None
@property
def _keyring(self):
if self.__keyring is None:
# Setup logging for keyring if we are debugging, althought keyring's logging
# is quite scarce ATM
from datalad.log import lgr
import logging
lgr_level = lgr.getEffectiveLevel()
if lgr_level < logging.DEBUG:
keyring_lgr = logging.getLogger('keyring')
keyring_lgr.setLevel(lgr_level)
keyring_lgr.handlers = lgr.handlers
lgr.debug("Importing keyring")
import keyring
self.__keyring = keyring
return self.__keyring
@classmethod
def _get_service_name(cls, name):
return "datalad-%s" % str(name)
# proxy few methods of interest explicitly, to be rebound to the module's
def get(self, name, field):
# consult environment, might be provided there
val = self._keyring.get_password(self._get_service_name(name), field)
if val is None:
val = os.environ.get(('DATALAD_%s_%s' % (name, field)).replace('-', '_'), None)
return val
def set(self, name, field, value):
return self._keyring.set_password(self._get_service_name(name), field, value)
def delete(self, name, field=None):
if field is None:
raise NotImplementedError("Deletion of all fields associated with a name")
return self._keyring.delete_password(self._get_service_name(name), field)
class MemoryKeyring(object):
"""A simple keyring which just stores provided info in memory
Primarily for testing
"""
def __init__(self):
self.entries = {}
def get(self, name, field):
"""Get password from the specified service.
"""
# to mimic behavior of keyring module
return self.entries[name][field] \
if name in self.entries and field in self.entries[name] \
else None
def set(self, name, field, value):
"""Set password for the user in the specified service.
"""
self.entries.setdefault(name, {}).update({field: value})
def delete(self, name, field=None):
"""Delete password from the specified service.
"""
if name in self.entries:
if field:
self.entries[name].pop(field)
else:
# TODO: might be implemented by some super class if .keys() of some kind provided
self.entries.pop(name)
else:
raise KeyError("No entries associated with %s" % name)
keyring = Keyring()
```
|
{
"source": "jelmerdejong/flask-email-login",
"score": 2
}
|
#### File: flask-email-login/app/models.py
```python
from time import time
from flask import current_app
import jwt
from . import db
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
class User(UserMixin, db.Model):
id = db.Column(db.Integer,
primary_key=True)
email = db.Column(db.String(320),
nullable=False,
unique=True)
created_on = db.Column(db.DateTime,
index=False,
unique=False,
nullable=True)
last_login = db.Column(db.DateTime,
index=False,
unique=False,
nullable=True)
def get_login_token(self, expires_in=600):
return jwt.encode(
{'login_token': self.id, 'exp': time() + expires_in},
current_app.config['SECRET_KEY'],
algorithm='HS256').decode('utf-8')
@staticmethod
def verify_login_token(token):
try:
id = jwt.decode(token, current_app.config['SECRET_KEY'],
algorithms=['HS256'])['login_token']
except:
return
return User.query.get(id)
def __repr__(self):
return '<User {}>'.format(self.email)
```
|
{
"source": "jelmerdereus/simple-nettools",
"score": 3
}
|
#### File: simple-nettools/gcp_netblocks/gcp_netblocks.py
```python
import dns.resolver
def main():
netblock_response = dns.resolver.query('_cloud-netblocks.googleusercontent.com', 'TXT').rrset
netblock_names = [rec[8:] for rec in str(netblock_response).split(' ')[5:-1]]
all_subnets = []
for name in netblock_names:
netblock_response = dns.resolver.query(name, 'TXT').rrset
subnets = [net[4:] for net in str(netblock_response).split(' ')[5:-1]]
all_subnets = all_subnets + subnets
print(all_subnets)
if __name__ == '__main__':
main()
```
|
{
"source": "Jelmerro/modmov",
"score": 3
}
|
#### File: Jelmerro/modmov/compress.py
```python
import argparse
import os
import util
def handle_movie(folder, movie, compression):
if not os.path.isdir(os.path.join(folder, "Compressed")):
os.mkdir(os.path.join(folder, "Compressed"))
util.cprint(f"Found movie: '{movie}'", "green")
com_movie = os.path.join(folder, "Compressed", movie)
movie = os.path.join(folder, movie)
util.run_command(
f'ffmpeg -i "{movie}" -map 0:v -map 0:a -map 0:s? -scodec copy -crf '
f'{compression} "{com_movie}"')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Compress the bitrate of any mkv or mp4")
parser.add_argument(
"location", help="Location of the folder/file to process")
parser.add_argument(
"-c", "--compress", type=int, default=20, choices=range(0, 51),
help="Level of compression, 0=lossless, 50=garbage, 20=default")
args = parser.parse_args()
files = util.list_movies(args.location)
for f in files:
handle_movie(f["dir"], f["file"], args.compress)
if not files:
print("No movie files found in the specified directory")
print("Input files are expected to be: .mkv or .mp4")
print("All matched files will be compressed to reduce size")
```
#### File: Jelmerro/modmov/extract.py
```python
import argparse
import os
import re
import util
def handle_movie(folder, movie):
if not os.path.isdir(os.path.join(folder, "Streamable")):
os.mkdir(os.path.join(folder, "Streamable"))
util.cprint(f"Found movie: '{movie}'", "green")
movie_mp4 = os.path.join(
folder, "Streamable",
re.sub(r"(\.mkv|\.mp4)$", ".mp4", movie))
movie = os.path.join(folder, movie)
util.run_command(f'ffmpeg -i "{movie}" -c copy "{movie_mp4}"')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Extract a streamable mp4 from any mkv or mp4")
parser.add_argument(
"location", help="Location of the folder/file to process")
args = parser.parse_args()
files = util.list_movies(args.location)
for f in files:
handle_movie(f["dir"], f["file"])
if not files:
print("No movie files found in the specified directory")
print("Input files are expected to be: .mkv or .mp4")
print("All matched files will be extracted to a single mp4 per movie")
```
|
{
"source": "Jelmerro/stagger",
"score": 2
}
|
#### File: stagger/stagger/conversion.py
```python
from warnings import warn
class Unsync:
"Conversion from/to unsynchronized byte sequences."
@staticmethod
def gen_decode(iterable):
"A generator for de-unsynchronizing a byte iterable."
sync = False
for b in iterable:
if sync and b & 0xE0:
warn("Invalid unsynched data", Warning)
if not (sync and b == 0x00):
yield b
sync = (b == 0xFF)
@staticmethod
def gen_encode(data):
"A generator for unsynchronizing a byte iterable."
sync = False
for b in data:
if sync and (b == 0x00 or b & 0xE0):
yield 0x00 # Insert sync char
yield b
sync = (b == 0xFF)
if sync:
yield 0x00 # Data ends on 0xFF
@staticmethod
def decode(data):
"Remove unsynchronization bytes from data."
return bytes(Unsync.gen_decode(data))
@staticmethod
def encode(data):
"Insert unsynchronization bytes into data."
return bytes(Unsync.gen_encode(data))
class UnsyncReader:
"Unsynchronized file reader."
def __init__(self, file):
self.file = file
self.gen = Unsync.gen_decode(self.__gen_readchar())
def __gen_readchar(self):
try:
while True:
yield self.file.read(1)[0]
except EOFError:
pass
def read(self, n):
data = bytes(b for i, b in zip(range(n), self.gen))
if len(data) < n:
raise EOFError
return data
class Syncsafe:
"""Conversion to/from syncsafe integers.
Syncsafe integers are big-endian 7-bit byte sequences.
"""
@staticmethod
def decode(data):
"Decodes a syncsafe integer"
value = 0
for b in data:
if b > 127: # iTunes bug
raise ValueError("Invalid syncsafe integer")
value <<= 7
value += b
return value
@staticmethod
def encode(i, *, width=-1):
"""Encodes a nonnegative integer into syncsafe format
When width > 0, then len(result) == width
When width < 0, then len(result) >= abs(width)
"""
if i < 0:
raise ValueError("value is negative")
assert width != 0
data = bytearray()
while i:
data.append(i & 127)
i >>= 7
if width > 0 and len(data) > width:
raise ValueError("Integer too large")
if len(data) < abs(width):
data.extend([0] * (abs(width) - len(data)))
data.reverse()
return data
class Int8:
"""Conversion to/from binary integer values of any length."""
@staticmethod
def decode(data):
"Decodes an 8-bit big-endian integer of any length"
value = 0
for b in data:
value <<= 8
value += b
return value
@staticmethod
def encode(i, *, width=-1):
"Encodes a positive integer to a big-endian bytearray of given length"
assert width != 0
if i is None:
i = 0
if i < 0:
raise ValueError("Nonnegative integer expected")
data = bytearray()
while i:
data.append(i & 255)
i >>= 8
if width > 0 and len(data) > width:
raise ValueError("Integer too large")
if len(data) < abs(width):
data.extend([0] * (abs(width) - len(data)))
return bytes(data[::-1])
```
#### File: stagger/stagger/specs.py
```python
from abc import abstractmethod, ABCMeta
from collections.abc import ByteString, Sequence
from warnings import warn
from stagger.conversion import Int8
from stagger.errors import FrameError, FrameWarning
def optionalspec(spec):
spec._optional = True
return spec
class Spec(metaclass=ABCMeta):
def __init__(self, name):
self.name = name
_optional = False
@abstractmethod
def read(self, frame, data):
pass
@abstractmethod
def write(self, frame, value):
pass
def validate(self, frame, value):
self.write(frame, value)
return value
def to_str(self, value):
return str(value)
class ByteSpec(Spec):
def read(self, frame, data):
if len(data) < 1:
raise EOFError()
return data[0], data[1:]
def write(self, frame, value):
return bytes([value])
def validate(self, frame, value):
if value is None:
return value
if not isinstance(value, int):
raise TypeError("Not a byte")
if value not in range(256):
raise ValueError("Invalid byte value")
return value
class IntegerSpec(Spec):
"""An 8-bit, big-endian unsigned integer of specified width.
Width is the number of bits in the representation.
If width is a sting, it must name a frame attribute to get the
width from.
The width is automatically rounded up to the nearest multiple of 8.
"""
def __init__(self, name, width):
super().__init__(name)
self.width = width
def _width(self, frame):
if isinstance(self.width, str):
return (getattr(frame, self.width) + 7) // 8
else:
return (self.width + 7) // 8
def read(self, frame, data):
w = self._width(frame)
if len(data) < w:
raise EOFError()
return Int8.decode(data[:w]), data[w:]
def write(self, frame, value):
return Int8.encode(value, width=self._width(frame))
def validate(self, frame, value):
if value is None:
return value
if type(value) is not int:
raise TypeError("Not an integer: {0}".format(repr(value)))
w = self._width(frame)
if value < 0:
raise ValueError("Value is negative")
if value >= 1 << (w << 3):
raise ValueError("Value is too large")
return value
class SignedIntegerSpec(IntegerSpec):
"""An 8-bit, big-endian two's-complement signed integer of specified width.
Width is the number of bits in the representation.
If width is a sting, it must name a frame attribute to get the
width from.
The width is automatically rounded up to the nearest multiple of 8.
"""
def __init__(self, name, width):
super().__init__(name, width=width)
def read(self, frame, data):
w = self._width(frame)
(value, data) = super().read(frame, data)
if value & (1 << ((w << 3) - 1)): # Negative value
value -= (1 << (w << 3))
return value, data
def write(self, frame, value):
w = self._width(frame)
if value < 0:
value += (1 << (w << 3))
return super().write(frame, value)
def validate(self, frame, value):
if value is None:
return value
if type(value) is not int:
raise TypeError("Not an integer")
w = self._width(frame)
if value >= (1 << ((w << 3) - 1)):
raise ValueError("Value is too large")
if value < -(1 << ((w << 3) - 1)):
raise ValueError("Value is too small")
return value
class RVADIntegerSpec(IntegerSpec):
"""An 8-bit, big-endian signed integer in RVAD format.
The value is stored in sign + magnitude format,
with the sign bit encoded in bit <signbit> of the
frame's <signs> attribute. A zero sign bit indicates
the value is negative.
"""
def __init__(self, name, width, signbit, signs="signs"):
super().__init__(name, width)
self.signbit = signbit
self.signs = signs
def read(self, frame, data):
(value, data) = super().read(frame, data)
if not getattr(frame, self.signs) & (1 << self.signbit):
value *= -1
return (value, data)
def write(self, frame, value):
return super().write(frame, abs(value))
def validate(self, frame, value):
if value is None:
return value
if type(value) is not int:
raise TypeError("Not an integer: {0}".format(repr(value)))
# Update sign bit in frame.signs.
signs = getattr(frame, self.signs)
if value < 0:
signs &= ~(1 << self.signbit)
else:
signs |= 1 << self.signbit
setattr(frame, self.signs, signs)
w = self._width(frame)
if abs(value) >= 1 << (w << 3):
raise ValueError("Value is too large")
return value
class VarIntSpec(Spec):
def read(self, frame, data):
if len(data) == 0:
raise EOFError()
bits = data[0]
data = data[1:]
bytecount = (bits + 7) >> 3
if len(data) < bytecount:
raise EOFError()
return Int8.decode(data[:bytecount]), data[bytecount:]
def write(self, frame, value):
bytecount = 4
t = value >> 32
while t > 0:
t >>= 32
bytecount += 4
return Int8.encode(bytecount * 8, width=1) + Int8.encode(
value, width=bytecount)
def validate(self, frame, value):
if value is None:
return value
if type(value) is not int:
raise TypeError("Not an integer")
if value < 0:
raise ValueError("Value is negative")
return value
class BinaryDataSpec(Spec):
def read(self, frame, data):
return data, bytes()
def write(self, frame, value):
return bytes(value)
def validate(self, frame, value):
if value is None:
return bytes()
if not isinstance(value, ByteString):
raise TypeError("Not a byte sequence")
return value
def to_str(self, value):
return "{0}{1}".format(value[0:16], "..." if len(value) > 16 else "")
class SimpleStringSpec(Spec):
def __init__(self, name, length):
super().__init__(name)
self.length = length
def read(self, frame, data):
return data[:self.length].decode("latin-1"), data[self.length:]
def write(self, frame, value):
if value is None:
return b" " * self.length
data = value.encode("latin-1")
if len(data) != self.length:
raise ValueError("String length mismatch")
return data
def validate(self, frame, value):
if value is None:
return None
if not isinstance(value, str):
raise TypeError("Not a string")
if len(value) != self.length:
raise ValueError("String length mismatch")
value.encode("latin-1")
return value
class LanguageSpec(SimpleStringSpec):
def __init__(self, name):
super().__init__(name, 3)
class NullTerminatedStringSpec(Spec):
def read(self, frame, data):
rawstr, _sep, data = data.partition(b"\x00")
return rawstr.decode('latin-1'), data
def write(self, frame, value):
return value.encode('latin-1') + b"\x00"
def validate(self, frame, value):
if value is None:
return ""
if not isinstance(value, str):
raise TypeError("Not a string")
value.encode('latin-1')
return value
class URLStringSpec(NullTerminatedStringSpec):
def read(self, frame, data):
rawstr, _sep, data = data.partition(b"\x00")
if len(rawstr) == 0 and len(data) > 0:
rawstr, _sep, data = data.partition(b"\x00")
return rawstr.decode('latin-1'), data
class EncodingSpec(ByteSpec):
"EncodingSpec must be the first spec."
def read(self, frame, data):
enc, data = super().read(frame, data)
if enc & 0xFC:
raise FrameError("Invalid encoding")
return enc, data
def validate(self, frame, value):
if value is None:
return value
def norm(s):
return s.lower().replace("-", "")
if isinstance(value, str):
for i in range(len(EncodedStringSpec._encodings)):
if norm(EncodedStringSpec._encodings[i][0]) == norm(value):
value = i
break
else:
raise ValueError("Unknown encoding: " + repr(value))
if not isinstance(value, int):
raise TypeError("Not an encoding")
if 0 <= value <= 3:
return value
raise ValueError("Invalid encoding 0x{0:X}".format(value))
def to_str(self, value):
if value is None:
return ""
return EncodedStringSpec._encodings[value][0]
class EncodedStringSpec(Spec):
_encodings = (('latin-1', b"\x00"),
('utf-16', b"\x00\x00"),
('utf-16-be', b"\x00\x00"),
('utf-8', b"\x00"))
def read(self, frame, data):
enc, term = self._encodings[frame.encoding]
if len(term) == 1:
rawstr, _sep, data = data.partition(term)
else:
index = len(data)
for i in range(0, len(data), 2):
if data[i:i+2] == term:
index = i
break
if index & 1:
raise EOFError()
rawstr = data[:index]
data = data[index+2:]
return rawstr.decode(enc), data
def write(self, frame, value):
assert frame.encoding is not None
enc, term = self._encodings[frame.encoding]
return value.encode(enc) + term
def validate(self, frame, value):
if value is None:
return ""
if not isinstance(value, str):
raise TypeError("Not a string")
if frame.encoding is not None:
self.write(frame, value)
return value
class EncodedFullTextSpec(EncodedStringSpec):
pass
class SequenceSpec(Spec):
"""Recognizes a sequence of values, all of the same spec."""
def __init__(self, name, spec):
super().__init__(name)
self.spec = spec
def read(self, frame, data):
"Returns a list of values, eats all of data."
seq = []
while data:
elem, data = self.spec.read(frame, data)
seq.append(elem)
return seq, data
def write(self, frame, value):
if isinstance(value, str):
return self.spec.write(frame, value)
data = bytearray()
for v in value:
data.extend(self.spec.write(frame, v))
return data
def validate(self, frame, value):
if value is None:
return []
if isinstance(value, str):
value = [value]
return [self.spec.validate(frame, v) for v in value]
class MultiSpec(Spec):
def __init__(self, name, *specs):
super().__init__(name)
self.specs = specs
def read(self, frame, data):
seq = []
while data:
record = []
origdata = data
try:
for s in self.specs:
elem, data = s.read(frame, data)
record.append(elem)
seq.append(tuple(record))
except (EOFError, ValueError):
if len(seq) == 0:
raise
warn("Frame {0} has {1} bytes of junk at end".format(
frame.frameid, len(origdata)), FrameWarning)
frame.junkdata = origdata
data = b''
return seq, data
def write(self, frame, value):
data = bytearray()
for v in value:
for i in range(len(self.specs)):
data.extend(self.specs[i].write(frame, v[i]))
return bytes(data)
def validate(self, frame, value):
if value is None:
return []
res = []
for v in value:
if not isinstance(v, Sequence) or isinstance(v, str):
raise TypeError("Records must be sequences")
if len(v) != len(self.specs):
raise ValueError("Invalid record length")
res.append(tuple(self.specs[i].validate(frame, v[i])
for i in range(len(self.specs))))
return res
class ASPISpec(Spec):
"A list of frame.N integers whose width depends on frame.b."
def read(self, frame, data):
width = 1 if frame.b == 1 else 2
value = []
if len(data) < width * frame.N:
raise EOFError
for _ in range(frame.N):
value.append(Int8.decode(data[:width]))
data = data[width:]
return value, data
def write(self, frame, value):
width = 1 if frame.b == 1 else 2
data = bytearray()
for v in value:
data.extend(Int8.encode(v, width=width))
return data
def validate(self, frame, value):
if value is None:
return []
if not isinstance(value, Sequence) or isinstance(value, str):
raise TypeError("ASPISpec needs a sequence of integers")
if len(value) != frame.N:
raise ValueError("ASPISpec needs {0} integers".format(frame.N))
self.write(frame, value)
res = []
for v in value:
res.append(v)
return res
class PictureTypeSpec(ByteSpec):
picture_types = (
"Other", "32x32 icon", "Other icon", "Front Cover", "Back Cover",
"Leaflet", "Media", "Lead artist", "Artist", "Conductor",
"Band/Orchestra", "Composer", "Lyricist/text writer",
"Recording Location", "Recording", "Performance", "Screen capture",
"A bright coloured fish", "Illustration", "Band/artist",
"Publisher/Studio")
def validate(self, frame, value):
if value is None:
return value
if isinstance(value, str):
def matches(value, name):
return value.lower() == name.lower()
for i in range(len(self.picture_types)):
if matches(value, self.picture_types[i]):
value = i
break
else:
raise ValueError("Unknown picture type: " + repr(value))
if not isinstance(value, int):
raise TypeError("Not a picture type")
if 0 <= value < len(self.picture_types):
return value
raise ValueError("Unknown picture type 0x{0:X}".format(value))
def to_str(self, value):
return "{1}({0})".format(value, self.picture_types[value])
```
#### File: stagger/test/friendly.py
```python
import unittest
import os.path
import warnings
from stagger.tags import Tag22, Tag23, Tag24
from stagger.id3 import TT2, TIT1, TIT2, TPE1, TPE2, TALB, TCOM, TCON, TSOT
from stagger.id3 import TSOP, TSO2, TSOA, TSOC, TRCK, TPOS, TYE, TDA, TIM, TYER
from stagger.id3 import TDAT, TIME, TDRC, PIC, APIC, COM, COMM
from stagger.errors import Warning
class FriendlyTestCase(unittest.TestCase):
def testTitle22(self):
tag = Tag22()
tag[TT2] = "Foobar"
self.assertEqual(tag.title, "Foobar")
tag[TT2] = ("Foo", "Bar")
self.assertEqual(tag.title, "Foo / Bar")
tag.title = "Baz"
self.assertEqual(tag[TT2], TT2(text=["Baz"]))
self.assertEqual(tag.title, "Baz")
tag.title = "Quux / Xyzzy"
self.assertEqual(tag[TT2], TT2(text=["Quux", "Xyzzy"]))
self.assertEqual(tag.title, "Quux / Xyzzy")
def testTitle(self):
for tagcls in Tag23, Tag24:
tag = tagcls()
tag[TIT2] = "Foobar"
self.assertEqual(tag.title, "Foobar")
tag[TIT2] = ("Foo", "Bar")
self.assertEqual(tag.title, "Foo / Bar")
tag.title = "Baz"
self.assertEqual(tag[TIT2], TIT2(text=["Baz"]))
self.assertEqual(tag.title, "Baz")
tag.title = "Quux / Xyzzy"
self.assertEqual(tag[TIT2], TIT2(text=["Quux", "Xyzzy"]))
self.assertEqual(tag.title, "Quux / Xyzzy")
def testTextFrames(self):
for tagcls in Tag22, Tag23, Tag24:
tag = tagcls()
for attr, frame in (("title", TIT2),
("artist", TPE1),
("album_artist", TPE2),
("album", TALB),
("composer", TCOM),
("genre", TCON),
("grouping", TIT1),
("sort_title", TSOT),
("sort_artist", TSOP),
("sort_album_artist", TSO2),
("sort_album", TSOA),
("sort_composer", TSOC)):
if tagcls == Tag22:
frame = frame._v2_frame
# No frame -> empty string
self.assertEqual(getattr(tag, attr), "")
# Set by frameid, check via friendly name
tag[frame] = "Foobar"
self.assertEqual(getattr(tag, attr), "Foobar")
tag[frame] = ("Foo", "Bar")
self.assertEqual(getattr(tag, attr), "Foo / Bar")
# Set by friendly name, check via frame id
setattr(tag, attr, "Baz")
self.assertEqual(getattr(tag, attr), "Baz")
self.assertEqual(tag[frame], frame(text=["Baz"]))
setattr(tag, attr, "Quux / Xyzzy")
self.assertEqual(getattr(tag, attr), "Quux / Xyzzy")
self.assertEqual(tag[frame], frame(text=["Quux", "Xyzzy"]))
# Set to empty string, check frame is gone
setattr(tag, attr, "")
self.assertTrue(frame not in tag)
# Repeat, should not throw KeyError
setattr(tag, attr, "")
self.assertTrue(frame not in tag)
def testTrackFrames(self):
for tagcls in Tag22, Tag23, Tag24:
tag = tagcls()
for track, total, frame in (("track", "track_total", TRCK),
("disc", "disc_total", TPOS)):
if tagcls == Tag22:
frame = frame._v2_frame
# No frame -> zero values
self.assertEqual(getattr(tag, track), 0)
self.assertEqual(getattr(tag, total), 0)
# Set by frameid, check via friendly name
tag[frame] = "12"
self.assertEqual(getattr(tag, track), 12)
self.assertEqual(getattr(tag, total), 0)
tag[frame] = "12/24"
self.assertEqual(getattr(tag, track), 12)
self.assertEqual(getattr(tag, total), 24)
tag[frame] = "Foobar"
self.assertEqual(getattr(tag, track), 0)
self.assertEqual(getattr(tag, total), 0)
# Set by friendly name, check via frame id
setattr(tag, track, 7)
self.assertEqual(getattr(tag, track), 7)
self.assertEqual(getattr(tag, total), 0)
self.assertEqual(tag[frame], frame(text=["7"]))
setattr(tag, total, 21)
self.assertEqual(getattr(tag, track), 7)
self.assertEqual(getattr(tag, total), 21)
self.assertEqual(tag[frame], frame(text=["7/21"]))
# Set to 0/0, check frame is gone
setattr(tag, total, 0)
self.assertEqual(getattr(tag, track), 7)
self.assertEqual(getattr(tag, total), 0)
self.assertEqual(tag[frame], frame(text=["7"]))
setattr(tag, track, 0)
self.assertEqual(getattr(tag, track), 0)
self.assertEqual(getattr(tag, total), 0)
self.assertTrue(frame not in tag)
# Repeat, should not throw
setattr(tag, track, 0)
setattr(tag, total, 0)
self.assertTrue(frame not in tag)
# Set just the total
setattr(tag, total, 13)
self.assertEqual(tag[frame], frame(text=["0/13"]))
def testDate22_23(self):
for tagcls, yearframe, dateframe, timeframe in (
(Tag22, TYE, TDA, TIM), (Tag23, TYER, TDAT, TIME)
):
tag = tagcls()
# Check empty
self.assertEqual(tag.date, "")
# Set to empty
tag.date = ""
self.assertEqual(tag.date, "")
# Set a year
tag.date = "2009"
self.assertEqual(tag.date, "2009")
tag.date = " 2009 "
self.assertEqual(tag.date, "2009")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertTrue(dateframe not in tag)
self.assertTrue(timeframe not in tag)
# Partial date
tag.date = "2009-07"
self.assertEqual(tag.date, "2009")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertTrue(dateframe not in tag)
self.assertTrue(timeframe not in tag)
# Full date
tag.date = "2009-07-12"
self.assertEqual(tag.date, "2009-07-12")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertEqual(tag[dateframe], dateframe("0712"))
self.assertTrue(timeframe not in tag)
# Date + time
tag.date = "2009-07-12 18:01"
self.assertEqual(tag.date, "2009-07-12 18:01")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertEqual(tag[dateframe], dateframe("0712"))
self.assertEqual(tag[timeframe], timeframe("1801"))
tag.date = "2009-07-12 18:01:23"
self.assertEqual(tag.date, "2009-07-12 18:01")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertEqual(tag[dateframe], dateframe("0712"))
self.assertEqual(tag[timeframe], timeframe("1801"))
tag.date = "2009-07-12T18:01:23"
self.assertEqual(tag.date, "2009-07-12 18:01")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertEqual(tag[dateframe], dateframe("0712"))
self.assertEqual(tag[timeframe], timeframe("1801"))
# Truncate to year only
tag.date = "2009"
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertTrue(dateframe not in tag)
self.assertTrue(timeframe not in tag)
def testDate24(self):
tag = Tag24()
# Check empty
self.assertEqual(tag.date, "")
# Set to empty
tag.date = ""
self.assertEqual(tag.date, "")
# Set a year
tag.date = "2009"
self.assertEqual(tag.date, "2009")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = " 2009 "
self.assertEqual(tag.date, "2009")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = "2009-07"
self.assertEqual(tag.date, "2009-07")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = "2009-07-12"
self.assertEqual(tag.date, "2009-07-12")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = "2009-07-12 18:01"
self.assertEqual(tag.date, "2009-07-12 18:01")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = "2009-07-12 18:01:23"
self.assertEqual(tag.date, "2009-07-12 18:01:23")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = "2009-07-12T18:01:23"
self.assertEqual(tag.date, "2009-07-12 18:01:23")
self.assertEqual(tag[TDRC], TDRC(tag.date))
def testPicture22(self):
tag = Tag22()
# Check empty
self.assertEqual(tag.picture, "")
# Set to empty
tag.picture = ""
self.assertEqual(tag.picture, "")
self.assertTrue(PIC not in tag)
tag.picture = os.path.join(os.path.dirname(
__file__), "samples", "cover.jpg")
self.assertEqual(tag[PIC][0].type, 0)
self.assertEqual(tag[PIC][0].desc, "")
self.assertEqual(tag[PIC][0].format, "JPG")
self.assertEqual(len(tag[PIC][0].data), 60511)
self.assertEqual(tag.picture, "Other(0)::<60511 bytes of jpeg data>")
# Set to empty
tag.picture = ""
self.assertEqual(tag.picture, "")
self.assertTrue(PIC not in tag)
def testPicture23_24(self):
for tagcls in Tag23, Tag24:
tag = tagcls()
# Check empty
self.assertEqual(tag.picture, "")
# Set to empty
tag.picture = ""
self.assertEqual(tag.picture, "")
self.assertTrue(APIC not in tag)
# Set picture.
tag.picture = os.path.join(os.path.dirname(
__file__), "samples", "cover.jpg")
self.assertEqual(tag[APIC][0].type, 0)
self.assertEqual(tag[APIC][0].desc, "")
self.assertEqual(tag[APIC][0].mime, "image/jpeg")
self.assertEqual(len(tag[APIC][0].data), 60511)
self.assertEqual(
tag.picture, "Other(0)::<60511 bytes of jpeg data>")
# Set to empty
tag.picture = ""
self.assertEqual(tag.picture, "")
self.assertTrue(APIC not in tag)
def testComment(self):
for tagcls, frameid in ((Tag22, COM), (Tag23, COMM), (Tag24, COMM)):
tag = tagcls()
# Comment should be the empty string in an empty tag.
self.assertEqual(tag.comment, "")
# Try to delete non-existent comment.
tag.comment = ""
self.assertEqual(tag.comment, "")
self.assertTrue(frameid not in tag)
# Set comment.
tag.comment = "Foobar"
self.assertEqual(tag.comment, "Foobar")
self.assertTrue(frameid in tag)
self.assertEqual(len(tag[frameid]), 1)
self.assertEqual(tag[frameid][0].lang, "eng")
self.assertEqual(tag[frameid][0].desc, "")
self.assertEqual(tag[frameid][0].text, "Foobar")
# Override comment.
tag.comment = "Baz"
self.assertEqual(tag.comment, "Baz")
self.assertTrue(frameid in tag)
self.assertEqual(len(tag[frameid]), 1)
self.assertEqual(tag[frameid][0].lang, "eng")
self.assertEqual(tag[frameid][0].desc, "")
self.assertEqual(tag[frameid][0].text, "Baz")
# Delete comment.
tag.comment = ""
self.assertEqual(tag.comment, "")
self.assertTrue(frameid not in tag)
def testCommentWithExtraFrame(self):
"Test getting/setting the comment when other comments are present."
for tagcls, frameid in ((Tag22, COM), (Tag23, COMM), (Tag24, COMM)):
tag = tagcls()
frame = frameid(lan="eng", desc="foo", text="This is a text")
tag[frameid] = [frame]
# Comment should be the empty string.
self.assertEqual(tag.comment, "")
# Try to delete non-existent comment.
tag.comment = ""
self.assertEqual(tag.comment, "")
self.assertEqual(len(tag[frameid]), 1)
# Set comment.
tag.comment = "Foobar"
self.assertEqual(tag.comment, "Foobar")
self.assertEqual(len(tag[frameid]), 2)
self.assertEqual(tag[frameid][0], frame)
self.assertEqual(tag[frameid][1].lang, "eng")
self.assertEqual(tag[frameid][1].desc, "")
self.assertEqual(tag[frameid][1].text, "Foobar")
# Override comment.
tag.comment = "Baz"
self.assertEqual(tag.comment, "Baz")
self.assertEqual(len(tag[frameid]), 2)
self.assertEqual(tag[frameid][0], frame)
self.assertEqual(tag[frameid][1].lang, "eng")
self.assertEqual(tag[frameid][1].desc, "")
self.assertEqual(tag[frameid][1].text, "Baz")
# Delete comment.
tag.comment = ""
self.assertEqual(tag.comment, "")
self.assertEqual(len(tag[frameid]), 1)
self.assertEqual(tag[frameid][0], frame)
suite = unittest.TestLoader().loadTestsFromTestCase(FriendlyTestCase)
if __name__ == "__main__":
warnings.simplefilter("always", Warning)
unittest.main(defaultTest="suite")
```
|
{
"source": "jelmer/slixmpp",
"score": 2
}
|
#### File: plugins/xep_0363/http_upload.py
```python
import logging
import os.path
from aiohttp import ClientSession
from mimetypes import guess_type
from slixmpp import Iq, __version__
from slixmpp.plugins import BasePlugin
from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.xmlstream.handler import Callback
from slixmpp.xmlstream.matcher import StanzaPath
from slixmpp.plugins.xep_0363 import stanza, Request, Slot, Put, Get, Header
log = logging.getLogger(__name__)
class FileUploadError(Exception):
pass
class UploadServiceNotFound(FileUploadError):
pass
class FileTooBig(FileUploadError):
pass
class XEP_0363(BasePlugin):
''' This plugin only supports Python 3.5+ '''
name = 'xep_0363'
description = 'XEP-0363: HTTP File Upload'
dependencies = {'xep_0030', 'xep_0128'}
stanza = stanza
default_config = {
'upload_service': None,
'max_file_size': float('+inf'),
'default_content_type': 'application/octet-stream',
}
def plugin_init(self):
register_stanza_plugin(Iq, Request)
register_stanza_plugin(Iq, Slot)
register_stanza_plugin(Slot, Put)
register_stanza_plugin(Slot, Get)
register_stanza_plugin(Put, Header, iterable=True)
self.xmpp.register_handler(
Callback('HTTP Upload Request',
StanzaPath('iq@type=get/http_upload_request'),
self._handle_request))
def plugin_end(self):
self._http_session.close()
self.xmpp.remove_handler('HTTP Upload Request')
self.xmpp.remove_handler('HTTP Upload Slot')
self.xmpp['xep_0030'].del_feature(feature=Request.namespace)
def session_bind(self, jid):
self.xmpp.plugin['xep_0030'].add_feature(Request.namespace)
def _handle_request(self, iq):
self.xmpp.event('http_upload_request', iq)
async def find_upload_service(self, domain=None, timeout=None):
results = await self.xmpp['xep_0030'].get_info_from_domain(
domain=domain, timeout=timeout)
candidates = []
for info in results:
for identity in info['disco_info']['identities']:
if identity[0] == 'store' and identity[1] == 'file':
candidates.append(info)
for info in candidates:
for feature in info['disco_info']['features']:
if feature == Request.namespace:
return info
def request_slot(self, jid, filename, size, content_type=None, ifrom=None,
timeout=None, callback=None, timeout_callback=None):
iq = self.xmpp.Iq()
iq['to'] = jid
iq['from'] = ifrom
iq['type'] = 'get'
request = iq['http_upload_request']
request['filename'] = filename
request['size'] = str(size)
request['content-type'] = content_type or self.default_content_type
return iq.send(timeout=timeout, callback=callback,
timeout_callback=timeout_callback)
async def upload_file(self, filename, size=None, content_type=None, *,
input_file=None, ifrom=None, domain=None, timeout=None,
callback=None, timeout_callback=None):
''' Helper function which does all of the uploading process. '''
if self.upload_service is None:
info_iq = await self.find_upload_service(
domain=domain, timeout=timeout)
if info_iq is None:
raise UploadServiceNotFound()
self.upload_service = info_iq['from']
for form in info_iq['disco_info'].iterables:
values = form['values']
if values['FORM_TYPE'] == ['urn:xmpp:http:upload:0']:
try:
self.max_file_size = int(values['max-file-size'])
except (TypeError, ValueError):
log.error('Invalid max size received from HTTP File Upload service')
self.max_file_size = float('+inf')
break
if input_file is None:
input_file = open(filename, 'rb')
if size is None:
size = input_file.seek(0, 2)
input_file.seek(0)
if size > self.max_file_size:
raise FileTooBig()
if content_type is None:
content_type = guess_type(filename)[0]
if content_type is None:
content_type = self.default_content_type
basename = os.path.basename(filename)
slot_iq = await self.request_slot(self.upload_service, basename, size,
content_type, ifrom, timeout,
timeout_callback=timeout_callback)
slot = slot_iq['http_upload_slot']
headers = {
'Content-Length': str(size),
'Content-Type': content_type or self.default_content_type,
**{header['name']: header['value'] for header in slot['put']['headers']}
}
# Do the actual upload here.
async with ClientSession(headers={'User-Agent': 'slixmpp ' + __version__}) as session:
response = await session.put(
slot['put']['url'],
data=input_file,
headers=headers,
timeout=timeout)
log.info('Response code: %d (%s)', response.status, await response.text())
response.close()
return slot['get']['url']
```
|
{
"source": "jelmer/splitwise",
"score": 3
}
|
#### File: splitwise/splitwise/category.py
```python
class Category(object):
def __init__(self,data=None):
self.id = data["id"]
self.name = data["name"]
self.subcategories = []
if "subcategories" in data:
for sub in data["subcategories"]:
self.subcategories.append(Category(sub))
def getId(self):
return self.id
def getName(self):
return self.name
def getSubcategories(self):
return self.subcategories
```
|
{
"source": "jelmer/testscenarios",
"score": 3
}
|
#### File: testscenarios/tests/test_scenarios.py
```python
import unittest
import testtools
from testtools.matchers import EndsWith
from testtools.tests.helpers import LoggingResult
import testscenarios
from testscenarios.scenarios import (
apply_scenario,
apply_scenarios,
generate_scenarios,
load_tests_apply_scenarios,
multiply_scenarios,
)
class TestGenerateScenarios(testtools.TestCase):
def hook_apply_scenarios(self):
self.addCleanup(setattr, testscenarios.scenarios, 'apply_scenarios',
apply_scenarios)
log = []
def capture(scenarios, test):
log.append((scenarios, test))
return apply_scenarios(scenarios, test)
testscenarios.scenarios.apply_scenarios = capture
return log
def test_generate_scenarios_preserves_normal_test(self):
class ReferenceTest(unittest.TestCase):
def test_pass(self):
pass
test = ReferenceTest("test_pass")
log = self.hook_apply_scenarios()
self.assertEqual([test], list(generate_scenarios(test)))
self.assertEqual([], log)
def test_tests_with_scenarios_calls_apply_scenarios(self):
class ReferenceTest(unittest.TestCase):
scenarios = [('demo', {})]
def test_pass(self):
pass
test = ReferenceTest("test_pass")
log = self.hook_apply_scenarios()
tests = list(generate_scenarios(test))
self.expectThat(
tests[0].id(), EndsWith('ReferenceTest.test_pass(demo)'))
self.assertEqual([([('demo', {})], test)], log)
def test_all_scenarios_yielded(self):
class ReferenceTest(unittest.TestCase):
scenarios = [('1', {}), ('2', {})]
def test_pass(self):
pass
test = ReferenceTest("test_pass")
tests = list(generate_scenarios(test))
self.expectThat(
tests[0].id(), EndsWith('ReferenceTest.test_pass(1)'))
self.expectThat(
tests[1].id(), EndsWith('ReferenceTest.test_pass(2)'))
def test_scenarios_attribute_cleared(self):
class ReferenceTest(unittest.TestCase):
scenarios = [
('1', {'foo': 1, 'bar': 2}),
('2', {'foo': 2, 'bar': 4})]
def test_check_foo(self):
pass
test = ReferenceTest("test_check_foo")
tests = list(generate_scenarios(test))
for adapted in tests:
self.assertEqual(None, adapted.scenarios)
def test_multiple_tests(self):
class Reference1(unittest.TestCase):
scenarios = [('1', {}), ('2', {})]
def test_something(self):
pass
class Reference2(unittest.TestCase):
scenarios = [('3', {}), ('4', {})]
def test_something(self):
pass
suite = unittest.TestSuite()
suite.addTest(Reference1("test_something"))
suite.addTest(Reference2("test_something"))
tests = list(generate_scenarios(suite))
self.assertEqual(4, len(tests))
class TestApplyScenario(testtools.TestCase):
def setUp(self):
super(TestApplyScenario, self).setUp()
self.scenario_name = 'demo'
self.scenario_attrs = {'foo': 'bar'}
self.scenario = (self.scenario_name, self.scenario_attrs)
class ReferenceTest(unittest.TestCase):
def test_pass(self):
pass
def test_pass_with_docstring(self):
""" The test that always passes.
This test case has a PEP 257 conformant docstring,
with its first line being a brief synopsis and the
rest of the docstring explaining that this test
does nothing but pass unconditionally.
"""
pass
self.ReferenceTest = ReferenceTest
def test_sets_specified_id(self):
raw_test = self.ReferenceTest('test_pass')
raw_id = "ReferenceTest.test_pass"
scenario_name = self.scenario_name
expect_id = "%(raw_id)s(%(scenario_name)s)" % vars()
modified_test = apply_scenario(self.scenario, raw_test)
self.expectThat(modified_test.id(), EndsWith(expect_id))
def test_sets_specified_attributes(self):
raw_test = self.ReferenceTest('test_pass')
modified_test = apply_scenario(self.scenario, raw_test)
self.assertEqual('bar', modified_test.foo)
def test_appends_scenario_name_to_short_description(self):
raw_test = self.ReferenceTest('test_pass_with_docstring')
modified_test = apply_scenario(self.scenario, raw_test)
raw_doc = self.ReferenceTest.test_pass_with_docstring.__doc__
raw_desc = raw_doc.split("\n")[0].strip()
scenario_name = self.scenario_name
expect_desc = "%(raw_desc)s (%(scenario_name)s)" % vars()
self.assertEqual(expect_desc, modified_test.shortDescription())
class TestApplyScenarios(testtools.TestCase):
def test_calls_apply_scenario(self):
self.addCleanup(setattr, testscenarios.scenarios, 'apply_scenario',
apply_scenario)
log = []
def capture(scenario, test):
log.append((scenario, test))
testscenarios.scenarios.apply_scenario = capture
scenarios = ["foo", "bar"]
result = list(apply_scenarios(scenarios, "test"))
self.assertEqual([('foo', 'test'), ('bar', 'test')], log)
def test_preserves_scenarios_attribute(self):
class ReferenceTest(unittest.TestCase):
scenarios = [('demo', {})]
def test_pass(self):
pass
test = ReferenceTest("test_pass")
tests = list(apply_scenarios(ReferenceTest.scenarios, test))
self.assertEqual([('demo', {})], ReferenceTest.scenarios)
self.assertEqual(ReferenceTest.scenarios, tests[0].scenarios)
class TestLoadTests(testtools.TestCase):
class SampleTest(unittest.TestCase):
def test_nothing(self):
pass
scenarios = [
('a', {}),
('b', {}),
]
def test_load_tests_apply_scenarios(self):
suite = load_tests_apply_scenarios(
unittest.TestLoader(),
[self.SampleTest('test_nothing')],
None)
result_tests = list(testtools.iterate_tests(suite))
self.assertEquals(
2,
len(result_tests),
result_tests)
def test_load_tests_apply_scenarios_old_style(self):
"""Call load_tests in the way used by bzr."""
suite = load_tests_apply_scenarios(
[self.SampleTest('test_nothing')],
self.__class__.__module__,
unittest.TestLoader(),
)
result_tests = list(testtools.iterate_tests(suite))
self.assertEquals(
2,
len(result_tests),
result_tests)
class TestMultiplyScenarios(testtools.TestCase):
def test_multiply_scenarios(self):
def factory(name):
for i in 'ab':
yield i, {name: i}
scenarios = multiply_scenarios(factory('p'), factory('q'))
self.assertEqual([
('a,a', dict(p='a', q='a')),
('a,b', dict(p='a', q='b')),
('b,a', dict(p='b', q='a')),
('b,b', dict(p='b', q='b')),
],
scenarios)
def test_multiply_many_scenarios(self):
def factory(name):
for i in 'abc':
yield i, {name: i}
scenarios = multiply_scenarios(factory('p'), factory('q'),
factory('r'), factory('t'))
self.assertEqual(
3**4,
len(scenarios),
scenarios)
self.assertEqual(
'a,a,a,a',
scenarios[0][0])
class TestPerModuleScenarios(testtools.TestCase):
def test_per_module_scenarios(self):
"""Generate scenarios for available modules"""
s = testscenarios.scenarios.per_module_scenarios(
'the_module', [
('Python', 'testscenarios'),
('unittest', 'unittest'),
('nonexistent', 'nonexistent'),
])
self.assertEqual('nonexistent', s[-1][0])
self.assertIsInstance(s[-1][1]['the_module'], tuple)
s[-1][1]['the_module'] = None
self.assertEqual(s, [
('Python', {'the_module': testscenarios}),
('unittest', {'the_module': unittest}),
('nonexistent', {'the_module': None}),
])
```
|
{
"source": "JelmerT/particle-timezone",
"score": 3
}
|
#### File: particle-timezone/timezone-server/main.py
```python
import logging
import json
import time
import googlemaps
from flask import Flask, request, make_response
import config
EVENT_NAME = "timezone"
logging.basicConfig(format='[%(asctime)s] [%(levelname)s] %(name)s %(message)s',
level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
app = Flask(__name__)
def get_timezone(json_data):
wifiAccessPoints = []
# look for access points in json
for access_point in json_data['data']['a']:
ap = dict(macAddress=access_point['m'],
signalStrength=access_point['s'],
channel= access_point['c'])
wifiAccessPoints.append(ap)
# app.logger.info(ap)
gmaps = googlemaps.Client(key=config.maps_api_key)
location = gmaps.geolocate(consider_ip=False,
wifi_access_points=wifiAccessPoints)
# app.logger.info('%s', json.dumps(location))
timezone = gmaps.timezone(location=location['location'])
app.logger.info('%s', json.dumps(timezone))
#TODO check if response is an OK
json_tz_response = dict(rawOffset=timezone['rawOffset'],
dstOffset=(timezone['dstOffset']))
return json_tz_response
@app.route('/')
def root():
return 'Timezone server is running!'
@app.route('/v1/timezone', methods=['POST'])
def post_timezone():
# check for correct content-type (json)
if request.is_json:
json_data = request.get_json()
app.logger.info('%s', json.dumps(json_data))
try:
# Check for particle test-event and reply 200
if json_data['data'] == "test-event":
return make_response(json.dumps(json_data), 200)
elif json_data['event'].endswith(EVENT_NAME):
json_tz_response = get_timezone(json_data)
return make_response(json.dumps(json_tz_response),200)
else:
return make_response("Malformed request", 400)
except KeyError:
return make_response("Malformed request", 400)
return make_response("Unsupported Content-Type", 415)
@app.errorhandler(500)
def server_error(e):
app.logger.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
@app.errorhandler(404)
def not_found_error(error):
return make_response("Page not found", 404)
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
```
|
{
"source": "je-l/multi-pack",
"score": 3
}
|
#### File: multi-pack/multipack/bwt.py
```python
from multipack.sorting import counting_sorted, merge_sort
CHUNK_SIZE = 10000
def bwt_encode(stream):
"""Rearrange a string to more easily compressable string.
:param stream: input stream for the transform.
:return: generator for bwt-rearranged string.
"""
chunk = stream.read(CHUNK_SIZE)
if not chunk:
yield b"\x03\x02"
while chunk:
chunk = b"\x02" + chunk + b"\x03"
table = _create_table(chunk)
for rotation in table:
yield rotation[-1:]
chunk = stream.read(CHUNK_SIZE)
def _create_table(string):
"""Create the table of different rotations.
:param string: base for the different rotations.
:return: Sorted list of rotations. There is len(string) different rotations
altogether.
"""
table = [None] * len(string)
for index, _ in enumerate(string):
start = string[index:]
end = string[:index]
table[-index] = (start + end)
merge_sort(table)
return table
def bwt_decode(enc_input):
"""Decode bwt-rearranged byte generator.
:param enc_input: byte generator of encoded BWT data.
:return: byte generator of decoded data. The yielded chunks are very large.
"""
input_chunk = _read_chunk(enc_input)
while input_chunk:
input_length = len(input_chunk)
byte_start, indices = _create_indices(input_chunk)
local_index = input_chunk.index(b"\x03")
output = [b""] * input_length
for i in range(input_length):
next_byte = input_chunk[local_index]
output[input_length - i - 1] = next_byte
local_index = byte_start[next_byte] + indices[local_index]
yield bytes(output).rstrip(b"\x03").strip(b"\x02")
input_chunk = _read_chunk(enc_input)
def _read_chunk(source):
"""Read chunk of data from generator and return it.
:param source: source generator for the bytes.
:return: chunk of bwt encoded data.
"""
next_bytes = b""
for byte in range(CHUNK_SIZE + 2):
try:
next_bytes += next(source)
except StopIteration:
break
return next_bytes
def _create_indices(bwt_input):
"""Generate indices helper list for BWT uncompression.
:param bwt_input: byte string input.
:return: indice lists for the uncompression.
"""
input_length = len(bwt_input)
byte_start = [None] * 256
indices = [None] * input_length
first_column = counting_sorted(bwt_input, 256)
count = [0] * 256
for byte in range(input_length):
index = bwt_input[byte]
indices[byte] = count[index]
count[index] += 1
index = first_column[byte]
if byte_start[index] is None:
byte_start[index] = byte
return byte_start, indices
def _find_decoded(table):
"""Look for row which ends to "end of text" (ETX) control character.
:param table: table of strings, where one should end with ETX control
character.
:return: decoded string.
"""
for row in table:
if row.endswith(b"\x03"):
return row
raise Exception("No ETX character-ending row in table.")
def rle_encode(byte_arr):
"""Use run length encoding on a byte string.
:param byte_arr: byte generator.
:return: byte string of encoded data.
"""
output = b""
streak = 1
try:
prev = next(byte_arr)
except StopIteration:
return b""
while True:
try:
char = next(byte_arr)
except StopIteration:
break
if char == prev and streak < 255:
streak += 1
else:
output += prev + bytes([streak])
streak = 1
prev = char
output += prev + bytes([streak])
return output
def rle_decode(stream):
"""Decode run-length encoded byte stream.
:param stream: byte stream for the encoding.
:return: byte-generator, which returns one byte at a time.
"""
while True:
byte = stream.read(1)
if not byte:
break
count = int.from_bytes(stream.read(1), byteorder="little")
for i in range(count):
yield byte
```
#### File: multi-pack/test/test_datastructures.py
```python
import unittest
from multipack.datastructures import Node, LinkedList, HashTable, DynamicArray
class TestDatastructures(unittest.TestCase):
def test_node_creation(self):
node = Node(1, 2)
self.assertEqual(2, node.data)
def test_node_comparison_1(self):
node_1 = Node(1, 1)
node_2 = Node(2, 2)
node_3 = Node(3, 3)
node_4 = Node(4, 4)
node_5 = Node(5, 5)
node_5_2 = Node(5, 5)
self.assertTrue(node_1 < node_2)
self.assertEqual(node_5, node_5_2)
self.assertTrue(node_4 > node_3)
def test_linked_list_creation(self):
linked_list = LinkedList()
self.assertIsNone(linked_list.first)
def test_linked_list_(self):
linked_list = LinkedList()
self.assertIsNone(linked_list.first)
def test_linked_list_add_last(self):
linked_list = LinkedList()
linked_list.add_last("k", 1)
linked_list.add_last("o", 2)
self.assertEqual(1, linked_list.first.data)
def test_linked_list_small(self):
linked_list = LinkedList()
for i in range(1, 5):
linked_list.add_first(i, i)
total = 0
node = linked_list.first
while node is not None:
total += node.data
node = node.next
self.assertEqual(10, total)
def test_hash_table_length_same_keys(self):
hash_table = HashTable()
for i in range(3):
hash_table["nice"] = 7
self.assertEqual(1, len(hash_table))
def test_hash_table_one_key_length(self):
hash_table = HashTable()
hash_table[2] = "k"
self.assertEqual(1, len(hash_table))
def test_hash_table_full_length(self):
hash_table = HashTable()
for i in range(4096):
hash_table[i] = i
self.assertEqual(4096, len(hash_table))
def test_hash_table_same_key_value_does_change(self):
hash_table = HashTable()
hash_table["ab"] = 3
hash_table["ab"] = 33
self.assertEqual(33, hash_table["ab"])
def test_missing_key_raises_key_error(self):
hash_table = HashTable()
with self.assertRaises(KeyError):
k = hash_table["k"]
def test_hash_table_contains(self):
hash_table = HashTable()
hash_table["a"] = 2
self.assertTrue("a" in hash_table)
def test_hash_table_contains_missing(self):
hash_table = HashTable()
hash_table["a"] = 2
self.assertFalse("b" in hash_table)
def test_hash_table_contains_complex(self):
hash_table = HashTable()
emoji_string = "$affs🐲d"
hash_table[emoji_string] = -1
self.assertTrue(emoji_string in hash_table)
def test_dyn_array_short(self):
dyn_arr = DynamicArray()
dyn_arr.append(2)
self.assertEqual(2, dyn_arr[0])
def test_dyn_array_medium(self):
dyn_arr = DynamicArray()
for i in range(30):
dyn_arr.append(i)
self.assertEqual(29, dyn_arr[29])
def test_dyn_array_len(self):
dyn_arr = DynamicArray()
self.assertEqual(0, len(dyn_arr))
def test_dyn_array_len_2(self):
dyn_arr = DynamicArray()
for i in range(33):
dyn_arr.append("k")
self.assertEqual(33, len(dyn_arr))
```
|
{
"source": "JelNiSlaw/Menel",
"score": 2
}
|
#### File: Menel/cogs/images.py
```python
import asyncio
import imghdr
import re
import textwrap
from io import BytesIO
from math import sqrt
from os import environ
from time import perf_counter
from typing import Literal, Optional
import discord
import httpx
from discord.ext import commands
from PIL import Image, ImageDraw, ImageFont
from .. import PATH
from ..bot import Menel
from ..utils import imperial
from ..utils.checks import has_attachments
from ..utils.context import Context
ASCII_IMG_SIZE = 128
ASCII_STYLES = {
"blocks": "█▓▒░ ",
"standard": "$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\"^`'. ",
"minimal": "@%+*:=-. ",
}
ONEPAGER_MAX_TEXT_LENGTH = 512 * 1024
ONEPAGER_MARGIN = 64
ONEPAGER_FONT = ImageFont.truetype(str(PATH / "resources" / "Roboto-Light.ttf"), size=20)
def image_to_ascii(image: Image, charset: str, invert: bool) -> str:
if image.width >= image.height:
size = ASCII_IMG_SIZE, round((image.height / image.width) * (ASCII_IMG_SIZE // 2))
else:
size = round((image.width / image.height) * (ASCII_IMG_SIZE * 2)), ASCII_IMG_SIZE
image = image.resize(size, Image.LANCZOS)
if image.mode != "L":
if not invert:
white = Image.new("RGB", image.size, color=0xFFFFFF)
white.paste(image, mask=image)
image = image.convert("L", dither=Image.NONE)
if invert:
charset = charset[::-1]
ascii_image = ""
imagedata = list(image.getdata())
for i in range(0, image.width * image.height - 1, image.width):
row = (charset[round(pixel / 255 * (len(charset) - 1))] for pixel in imagedata[i : i + image.width])
ascii_image += "".join(row).rstrip() + "\n"
return ascii_image
def prepare_text(text: str) -> str:
text = re.sub(r"\s+", " ", text.strip())
return "\n".join(
textwrap.wrap(
text,
width=round(sqrt(len(text)) * 1.25),
expand_tabs=False,
replace_whitespace=True,
drop_whitespace=True,
break_on_hyphens=False,
)
)
def render_page(text: str) -> BytesIO:
size = ONEPAGER_FONT.getsize_multiline(text)
image = Image.new("L", (size[0] + 2 * ONEPAGER_MARGIN, size[1] + 2 * ONEPAGER_MARGIN), 0xFFFFFF)
draw = ImageDraw.Draw(image)
draw.multiline_text((ONEPAGER_MARGIN, ONEPAGER_MARGIN), text, fill=0, font=ONEPAGER_FONT, align="center")
file = BytesIO()
image.save(file, format="png", optimize=True)
file.seek(0)
return file
class Images(commands.Cog):
@commands.command(aliases=["ascii-art", "ascii"])
@has_attachments(1, ("image/",))
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
@commands.max_concurrency(3)
async def asciiart(
self,
ctx: Context,
style: Optional[Literal["blocks", "standard", "minimal"]] = "blocks",
invert: Literal["invert", "inv", "inverted"] = False,
):
"""
Generuje ASCII art z załączonego zdjęcia
`style`: zestaw znaków
`invert`: zamiana ciemnych znaków z jasnymi
"""
try:
image = Image.open(BytesIO(await ctx.message.attachments[0].read()))
except discord.HTTPException:
await ctx.error("Nie udało się pobrać załączonego pliku")
return
if image.width < 64 or image.height < 64:
await ctx.error("Ten obraz jest za mały")
return
image = await asyncio.to_thread(image_to_ascii, image, ASCII_STYLES[style], invert is not None)
document = await imperial.create_document(image, short_urls=True, expiration=14)
await ctx.send(document.raw_link)
@commands.command(aliases=["burning"])
async def cooltext(self, ctx: Context, *, text: str):
"""Generuje palący się tekst na stronie cooltext.com"""
async with ctx.channel.typing():
r = await ctx.client.post(
"https://cooltext.com/PostChange",
data={
"LogoID": 4,
"Text": text,
"FontSize": 70,
"Color1_color": "#FF0000",
"Integer1": 15,
"Boolean1": "on",
"Integer13": "on",
"Integer12": "on",
"BackgroundColor_color": "#000000",
},
)
async with httpx.AsyncClient(verify=False) as client:
r = await client.get(r.json()["renderLocation"])
await ctx.send(file=discord.File(BytesIO(r.read()), "burning.gif"))
@commands.command(aliases=["jesus", "jestsus"])
@commands.cooldown(2, 10, commands.BucketType.user)
async def jezus(self, ctx: Context):
"""Wysyła losowe zdjęcie Jezusa"""
async with ctx.channel.typing():
r = await ctx.client.get(
"https://obrazium.com/v1/jesus", headers={"Authorization": environ["OBRAZIUM_TOKEN"]}
)
if r.status_code == 200:
file = BytesIO(r.read())
ext = imghdr.what(file) or "jpeg"
await ctx.send(file=discord.File(file, filename="jezus." + ext))
else:
await ctx.error("Nie działa")
@commands.command()
@has_attachments(1, ("text/",))
@commands.cooldown(2, 10, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
@commands.max_concurrency(2)
async def onepager(self, ctx: Context):
"""Renderuje cały załączony plik tesktowy na jednej stronie"""
attachment = ctx.message.attachments[0]
if not attachment.content_type or not attachment.content_type.startswith("text/"):
await ctx.error("Załącz plik tekstowy")
return
text = prepare_text((await attachment.read()).decode("utf8"))
if len(text) > ONEPAGER_MAX_TEXT_LENGTH:
await ctx.error(f"Maksymalna długość tekstu to {ONEPAGER_MAX_TEXT_LENGTH} znaków")
return
async with ctx.channel.typing():
start = perf_counter()
image = await asyncio.to_thread(render_page, text)
end = perf_counter()
await ctx.send(
f"Wyrenderowano w czasie {round(end - start, 1)}s",
file=discord.File(image, attachment.filename.rsplit(".", 1)[0] + ".png"),
)
@commands.command(aliases=["this-person-does-not-exist", "thispersondoesnotexist", "person"])
@commands.cooldown(2, 5, commands.BucketType.user)
async def tpdne(self, ctx: Context):
"""Pobiera wygenerowaną twarz z thispersondoesnotexist.com"""
async with ctx.channel.typing():
r = await ctx.client.get("https://thispersondoesnotexist.com/image")
await ctx.send(file=discord.File(BytesIO(r.read()), filename="person.jpeg"))
def setup(bot: Menel):
bot.add_cog(Images())
```
#### File: Menel/cogs/utilities.py
```python
import asyncio
import http.client
import os
import re
import unicodedata
from io import BytesIO
from math import floor
from pathlib import Path
from typing import Literal, Optional
from urllib import parse
import aiohttp
import dateutil.parser
import discord
import httpx
import pyppeteer
import pyppeteer.errors
import unidecode
import youtube_dl
from discord.ext import commands
from jishaku.codeblocks import codeblock_converter
from .. import PATH
from ..bot import Menel
from ..resources import filesizes
from ..resources.languages import LANGUAGES
from ..utils import embeds, imgur, markdown
from ..utils.checks import has_attachments
from ..utils.context import Context
from ..utils.converters import URL, LanguageConverter
from ..utils.errors import SendError
from ..utils.misc import get_image_url_from_message_or_reply
from ..utils.text_tools import escape, escape_str, limit_length, plural
AUTO = "auto"
class YouTubeDownloader:
def __init__(self, *, only_audio: bool = False):
self.status = {}
self.OPTIONS = {
"format": "best",
"outtmpl": str(PATH / "temp" / (os.urandom(16).hex() + ".%(ext)s")),
"merge_output_format": "mp4",
"default_search": "auto",
"progress_hooks": [self._hook],
"max_downloads": 1,
"ignore_config": True,
"no_playlist": True,
"no_mark_watched": True,
"geo_bypass": True,
"no_color": True,
"abort_on_error": True,
"abort_on_unavailable_fragment": True,
"no_overwrites": True,
"no_continue": True,
"quiet": True,
}
if only_audio:
self.OPTIONS.update(format="bestaudio/best", extract_audio=True)
self.ydl = youtube_dl.YoutubeDL(self.OPTIONS)
async def download(self, video: str) -> None:
self.status.clear()
await asyncio.to_thread(self.ydl.extract_info, video)
async def extract_info(self, video: str) -> dict:
return await asyncio.to_thread(self.ydl.extract_info, video, download=False)
def _hook(self, info: dict) -> None:
self.status = info
async def progress_message(self, m: Context):
msg = await m.send("Downloading…")
for _ in range(20):
if self.status:
break
await asyncio.sleep(0.5)
while self.status and self.status["status"] == "downloading":
ratio = self.status["downloaded_bytes"] / self.status["total_bytes"]
progress = ("\N{FULL BLOCK}" * floor(ratio * 20)).ljust(20, "\N{LIGHT SHADE}")
await msg.edit(
content=f"{progress} {ratio:.1%} "
f"{self.status['_speed_str'].strip()} Pozostało {self.status['_eta_str'].strip()}"
)
await asyncio.sleep(1.5)
await msg.delete()
class Utilities(commands.Cog):
@commands.command(aliases=["trans", "tr"])
@commands.cooldown(2, 5, commands.BucketType.user)
async def translate(
self,
ctx: Context,
lang1: LanguageConverter = "en",
lang2: Optional[LanguageConverter] = None,
*,
text: str = None,
):
"""
Tłumaczy teskt Tłumaczem Google
`lang1`: język docelowy, lub źródłowy jeśli podany jest argument `lang2`
`lang2`: język docelowy jeśli podany jest argument `lang1`
`text`: tekst do przetłumaczenia
"""
if lang2 is not None:
src = lang1
dest = lang2
else:
src = AUTO
dest = lang1
if text is None and (ref := ctx.message.reference):
msg = ref.resolved or await ctx.bot.fetch_message(ref.channel_id, ref.message_id)
text = msg.content
if text is None:
raise SendError("Podaj tekst do przetłumaczenia lub odpowiedz na wiadomość")
async with ctx.typing():
r = await ctx.bot.client.get(
"https://translate.googleapis.com/translate_a/single",
params={
"sl": src, # source language
"tl": dest, # translation language
"q": text, # query
"client": "gtx", # Google Translate Extension
"dj": 1, # what?
"dt": "t", # ok.
},
)
json = r.json()
if "sentences" not in json:
await ctx.error("Tłumacz Google nie zwrócił tłumaczenia")
return
if src == AUTO:
src = json.get("src", AUTO)
embed = embeds.with_author(ctx.author)
embed.title = LANGUAGES.get(src, src).title() + " ➜ " + LANGUAGES.get(dest, dest).title()
embed.description = limit_length(
escape(" ".join(s["trans"] for s in json["sentences"])), max_length=4096, max_lines=32
)
await ctx.send(embed=embed)
@commands.command(aliases=["urban-dictionary", "urban", "ud"])
async def urbandictionary(self, ctx: Context, *, query: str):
"""Wyszukuje podaną frazę w słowniku Urban Dictionary"""
async with ctx.typing():
r = await ctx.client.head(
"https://www.urbandictionary.com/define.php", params={"term": query}, allow_redirects=False
)
if r.status_code == 302:
url = httpx.URL(r.headers["Location"])
query = url.params["term"]
elif r.status_code != 200:
await ctx.error("Nie znalazłem tej frazy w Urban Dictionary.")
return
r = await ctx.client.get("https://api.urbandictionary.com/v0/define", params={"term": query})
json = r.json()
if "error" in json:
await ctx.error(f'Urban Dictionary zwróciło błąd:\n{json["error"]}')
return
data = json["list"][0]
def remove_brackets(text: str) -> str:
return re.sub(r"\[(?P<word>.*?)]", r"\g<word>", text, re.DOTALL)
embed = discord.Embed(
title=limit_length(data["word"], max_length=256),
url=data["permalink"],
description=escape(limit_length(remove_brackets(data["definition"]), max_length=2048, max_lines=16)),
color=discord.Color.green(),
)
if data["example"]:
embed.add_field(
name="Example",
value=limit_length(escape(remove_brackets(data["example"])), max_length=1024, max_lines=16),
inline=False,
)
embed.set_footer(text=f"Author: {data['author']}\n👍 {data['thumbs_up']} 👎 {data['thumbs_down']}")
embed.timestamp = dateutil.parser.parse(data["written_on"])
await ctx.send(embed=embed)
@commands.command(aliases=["m", "calculate", "calculator", "calc", "kalkulator"])
async def math(self, ctx: Context, *, expression: str):
"""Kalkulator <NAME>"""
async with ctx.channel.typing():
if re.sub(r"\s+", "", expression) == "2+2":
await asyncio.sleep(0.5)
await ctx.send("5")
return
r = await ctx.client.post("https://api.mathjs.org/v4/", json={"expr": expression})
json = r.json()
if json["error"]:
await ctx.error(escape(limit_length(json["error"], max_length=1024, max_lines=4)))
return
await ctx.send(escape(limit_length(json["result"], max_length=2048, max_lines=16)))
@commands.command()
@commands.cooldown(2, 5, commands.BucketType.user)
async def eval(self, ctx: Context, *, code: codeblock_converter):
"""Bezpiecznie wykonuje podany kod w wybranym języku"""
language, code = code
if not language:
await ctx.error("Umieść kod w bloku:\n\\`\\`\\`język\nkod\n\\`\\`\\`")
return
if not code.strip():
await ctx.error("Podaj kod do wykonania.")
return
async with ctx.channel.typing():
async with aiohttp.request(
"POST", "https://emkc.org/api/v1/piston/execute", json={"language": language, "source": code}
) as r:
json = await r.json()
if r.status != 200:
await ctx.error(json.get("message", "Nieznany błąd."))
return
output = [
markdown.codeblock(limit_length(json[out], max_length=512, max_lines=16))
for out in ("stdout", "stderr")
if json[out].strip()
]
embed = discord.Embed(
description=("\n".join(output) if output else "Twój kod nic nie wypisał.")
+ f'\n{json["language"]} {json["version"]}\n'
f"Powered by [Piston](https://github.com/engineer-man/piston)",
color=discord.Color.green() if not json["stderr"].strip() else discord.Color.red(),
)
await ctx.send(embed=embed)
@commands.command(aliases=["charinfo", "utf", "utf8", "utf-8", "u"])
async def unicode(self, ctx: Context, *, chars: str):
"""Pokazuje nazwy znaków standardu Unicode"""
output = []
for c in chars[:16]:
if c == "\u0020": # space
output.append("")
continue
info = f"{escape_str(c)} \N{EM DASH} U+{ord(c):0>4X}"
try:
info += f" \N{EM DASH} {unicodedata.name(c)}"
except ValueError:
pass
output.append(info)
if len(chars) > 16:
output.append("...")
await ctx.send(markdown.codeblock("\n".join(output)))
@commands.command()
async def unidecode(self, ctx: Context, *, text: str):
"""Zamienia znaki Unicode na ASCII używając [unidecode](https://github.com/avian2/unidecode)"""
await ctx.send(escape(limit_length(unidecode.unidecode(text), max_length=1024, max_lines=16), markdown=False))
@commands.command(aliases=["mc", "skin"])
@commands.cooldown(3, 10, commands.BucketType.user)
async def minecraft(self, ctx: Context, *, player: str):
"""Wysyła skin konta Minecraft Java Edition"""
async with ctx.channel.typing():
r = await ctx.client.get(f"https://api.mojang.com/users/profiles/minecraft/{parse.quote(player)}")
if r.status_code == 204:
await ctx.error("Nie znalazłem gracza o tym nicku.")
return
json = r.json()
uuid = json["id"]
requests = [
(f"https://api.mojang.com/user/profiles/{uuid}/names", None),
(f"https://crafatar.com/avatars/{uuid}", {"size": "256", "overlay": None}),
(f"https://crafatar.com/renders/head/{uuid}", {"scale": "6", "overlay": None}),
(f"https://crafatar.com/renders/body/{uuid}", {"scale": "10", "overlay": None}),
]
responses = await asyncio.gather(*(ctx.client.get(url, params=params) for (url, params) in requests))
name_history = responses[0].json()
avatar, head, body = (r.read() for r in responses[1:])
name_history = ", ".join(escape(name["name"]) for name in name_history)
avatar = discord.File(BytesIO(avatar), "avatar.png")
head = discord.File(BytesIO(head), "head.png")
body = discord.File(BytesIO(body), "body.png")
embed = discord.Embed(
description=f"Historia nazw: {name_history}\nUUID: `{uuid}`", color=discord.Color.green()
)
embed.set_author(name=json["name"], icon_url="attachment://head.png")
embed.set_thumbnail(url="attachment://avatar.png")
embed.set_image(url="attachment://body.png")
await ctx.send(embed=embed, files=[avatar, head, body])
@commands.command(aliases=["webshot"])
@commands.cooldown(2, 20, commands.BucketType.user)
@commands.max_concurrency(3, wait=True)
async def webimg(self, ctx: Context, fullpage: Optional[Literal["fullpage", "full"]], *, url: URL):
"""<NAME>ysyła zrzut ekranu strony internetowej"""
async with ctx.typing():
try:
browser = await pyppeteer.launch(
ignoreHTTPSErrors=True, headless=True, args=["--no-sandbox", "--disable-dev-shm-usage"]
)
except http.client.BadStatusLine:
await ctx.error("Nie udało się otworzyć przeglądarki. Spróbuj ponownie.")
return
page = await browser.newPage()
await page.setViewport(
{"width": 2048, "height": 1024, "deviceScaleFactor": 1 if fullpage is not None else 2}
)
try:
await page.goto(url, timeout=30000)
except TimeoutError:
await ctx.error("Minął czas na wczytanie strony.")
except (pyppeteer.errors.PageError, pyppeteer.errors.NetworkError):
await ctx.error("Nie udało się wczytać strony. Sprawdź czy podany adres jest poprawny.")
else:
await asyncio.sleep(2)
try:
screenshot: bytes = await page.screenshot(type="png", fullPage=fullpage is not None, encoding="binary") # type: ignore
except pyppeteer.errors.NetworkError as e:
await ctx.error(str(e))
else:
embed = embeds.with_author(ctx.author)
image = await imgur.upload_image(screenshot)
embed.description = f"Zdjęcie strony: {image}"
if ctx.channel.nsfw:
embed.set_image(url=image)
else:
embed.set_footer(text="Podgląd dostępny jest wyłącznie na kanałach NSFW")
await ctx.send(embed=embed)
finally:
await browser.close()
@commands.command(aliases=["sauce", "souce", "sn"])
@commands.is_nsfw()
@commands.cooldown(3, 20, commands.BucketType.user)
@commands.cooldown(6, 30) # API rate limit
async def saucenao(self, ctx: Context, *, art_url: URL = None):
"""Znajduje źródło obrazka używając saucenao.com API"""
url = art_url or await get_image_url_from_message_or_reply(ctx)
if url is None:
raise SendError("Podaj URL obrazka, załącz plik lub odpowiedz na wiadomość z załącznikiem")
async with ctx.typing():
r = await ctx.client.get(
"https://saucenao.com/search.php",
params={"url": url, "output_type": 2, "numres": 8, "api_key": os.environ["SAUCENAO_KEY"]},
)
json = r.json()
header = json["header"]
if header["status"] != 0:
raise SendError(f'{header["status"]}: {header["message"]}')
minimum_similarity: float = header["minimum_similarity"]
texts = []
for result in json["results"]:
header = result["header"]
data = result["data"]
similarity = float(header["similarity"])
if similarity < minimum_similarity:
continue
if "ext_urls" not in data:
continue
text = [f'**{similarity / 100:.0%}** {escape(header["index_name"])}']
text.extend(data["ext_urls"])
if "source" in data:
text.append(f'Source: {data["source"]}')
texts.append("\n".join(text))
if not texts:
raise SendError("Nie znaleziono źródła podanego obrazka")
await ctx.send(
embed=embeds.with_author(ctx.author, description="\n\n".join(texts)).set_footer(
text="Powered by saucenao.com"
)
)
@commands.command("unshorten-url", aliases=["unshorten", "unshort"])
async def unshorten_url(self, ctx: Context, *, url: URL):
"""Pokazuje przekierowania skróconego linku"""
urls = []
shortened = False
async with ctx.typing():
while True:
r = await ctx.client.head(url, allow_redirects=False)
urls.append(str(r.url))
if "Location" not in r.headers:
break
url = r.headers["Location"]
if len(urls) >= 16 or url in urls:
shortened = True
break
if len(urls) <= 1:
await ctx.error("Ten link nie jest skrócony")
return
if not shortened:
*urls, last = urls
else:
last = None
text = [markdown.code(limit_length(url, max_length=64)) for url in urls]
text.append(limit_length(last, max_length=512) if not shortened else "…")
await ctx.embed("\n".join(text))
@commands.command(aliases=["rtfm"])
@commands.cooldown(3, 10, commands.BucketType.user)
@commands.cooldown(3, 5) # API rate limit
async def docs(self, ctx: Context, *, query: str):
"""Przeszukuje dokumentację biblioteki discord.py (gałęzi master)"""
r = await ctx.client.get(
"https://idevision.net/api/public/rtfm",
params={
"show-labels": True,
"label-labels": False,
"location": "https://discordpy.readthedocs.io/en/master/",
"query": query,
},
)
json = r.json()
nodes = json["nodes"]
if not nodes:
await ctx.error("Nie znaleziono żadnych pasujących wyników")
return
text = [f"[{markdown.code(name)}]({url})" for name, url in nodes.items()]
embed = embeds.with_author(
ctx.author,
title=plural(len(nodes), "wynik", "wyniki", "wyników"),
description="\n".join(text),
color=discord.Color.green(),
)
embed.set_footer(text=f"Czas wyszukiwania: {float(json['query_time']) * 1000:.0f} ms")
await ctx.send(embed=embed)
@commands.command("youtube-dl", aliases=["youtubedl", "yt-dl", "ytdl", "download", "dl"])
@commands.cooldown(2, 20, commands.BucketType.user)
@commands.max_concurrency(2)
async def youtube_dl(self, ctx: Context, audio: Optional[Literal["audio"]], *, video: str):
"""
Pobiera film ze strony
`audio`: pobiera jedynie dźwięk filmu
`video`: link do strony z filmem
"""
await ctx.channel.trigger_typing()
downloader = YouTubeDownloader(only_audio=audio is not None)
progress_message = None
try:
info = await downloader.extract_info(video)
if "_type" in info and info["_type"] == "playlist":
info = info["entries"][0]
duration = info.get("duration")
filesize = info.get("filesize")
if not duration and not filesize:
await ctx.error("Nieznana długość i rozmiar filmu")
return
if duration and duration > 60 * 30:
await ctx.error("Maksymalna długość filmu to 30 minut")
return
if filesize and filesize > 200 * filesizes.MiB:
await ctx.error("Maksymalny rozmiar filmu to 100 MiB")
return
progress_message = asyncio.create_task(downloader.progress_message(ctx))
await downloader.download(info["webpage_url"])
except youtube_dl.utils.YoutubeDLError as e:
if progress_message:
progress_message.cancel()
await ctx.error(escape(limit_length("\n".join(e.args), max_length=1024, max_lines=16)))
return
path = Path(downloader.status["filename"])
try:
async with ctx.channel.typing():
with open(path, "rb") as f:
video = await imgur.upload_video(f.read())
finally:
path.unlink(missing_ok=True)
await ctx.send(video)
@commands.command("imgur")
@has_attachments(allowed_types=("image/",))
@commands.cooldown(2, 10, commands.BucketType.user)
async def _imgur(self, ctx: Context):
"""Przesyła załączone zdjęcia na Imgur"""
async with ctx.typing():
images = [await imgur.upload_image(await a.read()) for a in ctx.message.attachments]
await ctx.send("\n".join(f"<{image}>" for image in images))
def setup(bot: Menel):
bot.add_cog(Utilities())
```
#### File: Menel/utils/context.py
```python
from __future__ import annotations
import logging
import sys
import traceback
from typing import TYPE_CHECKING, Optional, Union
import discord
import httpx
from discord.ext import commands
from ..utils import embeds
from ..utils.markdown import code
from ..utils.text_tools import escape, location
if TYPE_CHECKING:
from ..bot import Menel
from .database import Database
log = logging.getLogger(__name__)
class Context(commands.Context):
message: discord.Message
guild: Optional[discord.Guild]
author: Union[discord.Member, discord.User]
bot: Menel
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.db: Database = self.bot.db
self.client: httpx.AsyncClient = self.bot.client
self.command_time = self.message.edited_at or self.message.created_at
async def send(
self, *args, channel: discord.abc.Messageable = None, no_reply: bool = False, **kwargs
) -> discord.Message:
if not channel:
channel = self.channel
if "reference" not in kwargs and not no_reply:
kwargs["reference"] = self.message.to_reference(fail_if_not_exists=False)
log.debug(f"Sending a message to {location(self.author, channel, self.guild)}")
return await channel.send(*args, **kwargs)
async def embed(self, content: str, *, embed_kwargs: dict = None, **message_kwargs) -> discord.Message:
return await self.send(
embed=embeds.with_author(
self.author, description=content, color=discord.Color.green(), **(embed_kwargs or {})
),
**message_kwargs,
)
async def error(self, content: str, *, embed_kwargs: dict = None, **message_kwargs) -> discord.Message:
return await self.send(
embed=embeds.with_author(
self.author, description=content, color=discord.Color.red(), **(embed_kwargs or {})
),
**message_kwargs,
)
async def ok_hand(self):
await self.send("\N{OK HAND SIGN}")
async def react_or_send(self, emoji: str):
permissions = self.my_permissions()
if permissions.add_reactions and permissions.read_message_history:
await self.message.add_reaction(emoji)
else:
await self.send(emoji)
async def clean_mentions(self, text: str, /) -> str:
return await commands.clean_content(fix_channel_mentions=True, use_nicknames=False).convert(self, text)
async def report_exception(self, exception: Exception) -> discord.Message:
log.error(exception)
traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
embed = embeds.with_author(self.author, title="Wystąpił błąd!", color=discord.Color.red())
embed.add_field(name=type(exception).__name__, value=escape(str(exception)), inline=False)
owner = self.guild.get_member(self.bot.owner_id) if self.guild else None
if owner:
text = owner.mention
allowed_mentions = discord.AllowedMentions(users=True)
else:
text = None
allowed_mentions = None
return await self.send(text, embed=embed, allowed_mentions=allowed_mentions)
def my_permissions(self) -> discord.Permissions:
return self.channel.permissions_for(self.me)
def author_permissions(self) -> discord.Permissions:
return self.channel.permissions_for(self.author)
@property
def clean_prefix(self):
if self.prefix in self.bot.prefix_base:
return f"@{self.bot.user.name} "
return self.prefix
async def get_prefixes_str(self, *, join: str = " ") -> str:
prefixes = await self.db.get_prefixes(self.guild)
return join.join([code("@" + self.bot.user.name)] + list(map(code, prefixes)))
```
#### File: Menel/utils/views.py
```python
from typing import Union
import discord
from .markdown import bold
from .text_tools import escape
class Confirm(discord.ui.View):
def __init__(self, user: Union[discord.Member, discord.abc.User]):
super().__init__(timeout=10)
self.user = user
self.result = None
async def interaction_check(self, interaction: discord.Interaction) -> bool:
if interaction.user != self.user:
await interaction.response.send_message(
f"Tylko {bold(escape(str(self.user)))} może używać tych przycisków", ephemeral=True
)
return False
return True
@discord.ui.button(label="Potwierdź", style=discord.ButtonStyle.green)
async def confirm(self, *_) -> None:
self.result = True
self.stop()
@discord.ui.button(label="Anuluj")
async def cancel(self, *_) -> None:
self.result = False
self.stop()
```
|
{
"source": "Jeloi/huely-sublime-plugin",
"score": 3
}
|
#### File: Jeloi/huely-sublime-plugin/huely_palette.py
```python
import sublime, sublime_plugin
import webbrowser
import urllib
class HuelyExtractCommand(sublime_plugin.TextCommand):
def run(self, edit):
base_url = "http://huely.co"
# base_url = "http://localhost:3000" # for development
url = base_url + "/api/extract"
# get the current view's contents
data = {
# 'text': "#333"
'text': self.view.substr(sublime.Region(0, self.view.size()))
}
# urlencode the data to be sent
data = urllib.parse.urlencode(data)
# encode the text data into bytes data
binary_data = data.encode("utf8")
response = urllib.request.urlopen(url, binary_data)
# decode the response. will be a palette ID if colors were extracted
palette_id = response.read().decode()
# print(palette_id)
if palette_id:
webbrowser.open_new_tab(base_url+"/palette/"+palette_id)
else:
print("No colors extracted waahhh")
```
|
{
"source": "JeLoueMonCampingCar/django-waffle",
"score": 2
}
|
#### File: django-waffle/waffle/models.py
```python
from __future__ import unicode_literals
try:
from django.utils import timezone as datetime
except ImportError:
from datetime import datetime
from django.contrib.auth.models import Group
from django.db import models
from django.db.models.signals import post_save, post_delete, m2m_changed
from django.contrib.sites.models import Site
from django.utils.encoding import python_2_unicode_compatible
from waffle.compat import AUTH_USER_MODEL, cache
from waffle.utils import get_setting, keyfmt
@python_2_unicode_compatible
class Flag(models.Model):
"""A feature flag.
Flags are active (or not) on a per-request basis.
"""
name = models.CharField(max_length=100, unique=True,
help_text='The human/computer readable name.')
everyone = models.NullBooleanField(blank=True, help_text=(
'Flip this flag on (Yes) or off (No) for everyone, overriding all '
'other settings. Leave as Unknown to use normally.'))
percent = models.DecimalField(max_digits=3, decimal_places=1, null=True,
blank=True, help_text=(
'A number between 0.0 and 99.9 to indicate a percentage of users for '
'whom this flag will be active.'))
testing = models.BooleanField(default=False, help_text=(
'Allow this flag to be set for a session for user testing.'))
superusers = models.BooleanField(default=True, help_text=(
'Flag always active for superusers?'))
staff = models.BooleanField(default=False, help_text=(
'Flag always active for staff?'))
authenticated = models.BooleanField(default=False, help_text=(
'Flag always active for authenticate users?'))
languages = models.TextField(blank=True, default='', help_text=(
'Activate this flag for users with one of these languages (comma '
'separated list)'))
groups = models.ManyToManyField(Group, blank=True, help_text=(
'Activate this flag for these user groups.'))
users = models.ManyToManyField(AUTH_USER_MODEL, blank=True, help_text=(
'Activate this flag for these users.'))
rollout = models.BooleanField(default=False, help_text=(
'Activate roll-out mode?'))
note = models.TextField(blank=True, help_text=(
'Note where this Flag is used.'))
created = models.DateTimeField(default=datetime.now, db_index=True,
help_text=('Date when this Flag was created.'))
modified = models.DateTimeField(default=datetime.now, help_text=(
'Date when this Flag was last modified.'))
sites = models.ManyToManyField(Site, default=None, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.modified = datetime.now()
super(Flag, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Switch(models.Model):
"""A feature switch.
Switches are active, or inactive, globally.
"""
name = models.CharField(max_length=100, unique=True,
help_text='The human/computer readable name.')
active = models.BooleanField(default=False, help_text=(
'Is this flag active?'))
note = models.TextField(blank=True, help_text=(
'Note where this Switch is used.'))
created = models.DateTimeField(default=datetime.now, db_index=True,
help_text=('Date when this Switch was created.'))
modified = models.DateTimeField(default=datetime.now, help_text=(
'Date when this Switch was last modified.'))
sites = models.ManyToManyField(Site, default=None, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.modified = datetime.now()
super(Switch, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = 'Switches'
@python_2_unicode_compatible
class Sample(models.Model):
"""A sample is true some percentage of the time, but is not connected
to users or requests.
"""
name = models.CharField(max_length=100, unique=True,
help_text='The human/computer readable name.')
percent = models.DecimalField(max_digits=4, decimal_places=1, help_text=(
'A number between 0.0 and 100.0 to indicate a percentage of the time '
'this sample will be active.'))
note = models.TextField(blank=True, help_text=(
'Note where this Sample is used.'))
created = models.DateTimeField(default=datetime.now, db_index=True,
help_text=('Date when this Sample was created.'))
modified = models.DateTimeField(default=datetime.now, help_text=(
'Date when this Sample was last modified.'))
sites = models.ManyToManyField(Site, default=None, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.modified = datetime.now()
super(Sample, self).save(*args, **kwargs)
def cache_flag(**kwargs):
action = kwargs.get('action', None)
# action is included for m2m_changed signal. Only cache on the post_*.
if not action or action in ['post_add', 'post_remove', 'post_clear']:
f = kwargs.get('instance')
cache.add(keyfmt(get_setting('FLAG_CACHE_KEY'), f.name), f)
cache.add(keyfmt(get_setting('FLAG_USERS_CACHE_KEY'), f.name),
f.users.all())
cache.add(keyfmt(get_setting('FLAG_GROUPS_CACHE_KEY'), f.name),
f.groups.all())
cache.add(keyfmt(get_setting('FLAG_SITES_CACHE_KEY'), f.name),
f.sites.all())
def uncache_flag(**kwargs):
flag = kwargs.get('instance')
data = {
keyfmt(get_setting('FLAG_CACHE_KEY'), flag.name): None,
keyfmt(get_setting('FLAG_USERS_CACHE_KEY'), flag.name): None,
keyfmt(get_setting('FLAG_GROUPS_CACHE_KEY'), flag.name): None,
keyfmt(get_setting('FLAG_SITES_CACHE_KEY'), flag.name): None,
keyfmt(get_setting('ALL_FLAGS_CACHE_KEY')): None
}
cache.set_many(data, 5)
post_save.connect(uncache_flag, sender=Flag, dispatch_uid='save_flag')
post_delete.connect(uncache_flag, sender=Flag, dispatch_uid='delete_flag')
m2m_changed.connect(uncache_flag, sender=Flag.users.through,
dispatch_uid='m2m_flag_users')
m2m_changed.connect(uncache_flag, sender=Flag.groups.through,
dispatch_uid='m2m_flag_groups')
def cache_sample(**kwargs):
sample = kwargs.get('instance')
cache.add(keyfmt(get_setting('SAMPLE_CACHE_KEY'), sample.name), sample)
cache.add(
keyfmt(get_setting('SAMPLE_SITES_CACHE_KEY'), sample.name),
sample.sites.all())
def uncache_sample(**kwargs):
sample = kwargs.get('instance')
cache.set(keyfmt(get_setting('SAMPLE_CACHE_KEY'), sample.name), None, 5)
cache.set(
keyfmt(get_setting('SAMPLE_SITES_CACHE_KEY'), sample.name), None, 5)
cache.set(keyfmt(get_setting('ALL_SAMPLES_CACHE_KEY')), None, 5)
post_save.connect(uncache_sample, sender=Sample, dispatch_uid='save_sample')
post_delete.connect(uncache_sample, sender=Sample,
dispatch_uid='delete_sample')
def cache_switch(**kwargs):
switch = kwargs.get('instance')
cache.add(keyfmt(get_setting('SWITCH_CACHE_KEY'), switch.name), switch)
cache.add(
keyfmt(get_setting('SWITCHES_SITES_CACHE_KEY'), switch.name),
switch.sites.all())
def uncache_switch(**kwargs):
switch = kwargs.get('instance')
cache.set(keyfmt(get_setting('SWITCH_CACHE_KEY'), switch.name), None, 5)
cache.set(
keyfmt(get_setting('SWITCHES_SITES_CACHE_KEY'), switch.name), None, 5)
cache.set(keyfmt(get_setting('ALL_SWITCHES_CACHE_KEY')), None, 5)
post_delete.connect(uncache_switch, sender=Switch,
dispatch_uid='delete_switch')
post_save.connect(uncache_switch, sender=Switch, dispatch_uid='save_switch')
```
|
{
"source": "jelovirt/dita-generator",
"score": 2
}
|
#### File: src/ditagen/cli.py
```python
import sys
import ditagen.dita
import ditagen.dtdgen
from ditagen.dita.v1_2 import *
from ditagen.dita.v1_1 import *
import ditagen.generator
from optparse import OptionParser, OptionGroup
#import urllib
#class UrnDitaGenerator(ditagen.generator.PluginGenerator):
#
# @staticmethod
# def generate_public_identifier(ext, id, dita_version, title, owner=None, suffix=None):
# """Generate URN public formal indentifier."""
# if owner != None and owner != u"OASIS":
# __ENTITY_MAP = {
# "dtd": u"doctypes",
# "ent": u"entities",
# "mod": u"modules"
# }
# desc = ["urn"]
# if owner is None:
# desc.extend([u"oasis", u"names", u"tc", u"dita"])
# else:
# desc.append(urllib.quote(owner))
# desc.append(urllib.quote(id))
# desc.append(__ENTITY_MAP[ext])
# if suffix != None:
# desc.append(urllib.quote(suffix))
# if dita_version != None and dita_version != "":
# desc.append(dita_version.strip())
# return u":".join(desc).lower()
# else:
# return ditagen.generator.DitaGenerator.generate_public_identifier(ext, id, dita_version, title, owner, suffix)
def main():
"""Main method."""
__topic_type = None
__parent_topic_type = None
__remove = {}
__global_atts = None
__domains = []
# new arguments
__parser = OptionParser(usage="usage: %prog [options] type topic id title [root]",
description="DITA Generator.")
__parser.add_option("-d", "--domain", action="append", dest="domains",
help="Add domain DOMAIN. Multiple occurrances allowed.", metavar="DOMAIN")
__parser.add_option("-v", "--version", dest="version", choices=("1.1", "1.2"),
help="DITA version. Defaults to 1.1.", metavar="VERSION")
__parser.set_defaults(version="1.1")
__parser.add_option("-o", "--owner", dest="owner",
help="Owner in FPI.", metavar="OWNER")
__parser.add_option("-u", "--system-identifier", dest="system_identifier",
help="System identifier base URI.", metavar="SYSTEM_IDENTIFIER")
__parser.add_option("-s", "--stylesheet", action="append", dest="stylesheet", choices=("docbook", "eclipse.plugin", "fo", "rtf", "xhtml"),
help="Stylesheet skeleton. Multiple occurrances allowed.", metavar="STYLE")
__parser.add_option("--plugin-name", dest="plugin_name",
help="Plugin name. Defaults to plugin ID.", metavar="PLUGIN_NAME")
__parser.add_option("--plugin-version", dest="plugin_version",
help="Plugin version", metavar="PLUGIN_VERSION")
__parser.add_option("-n", "--nested", dest="nested", action="store_true",
help="Support nested topics.")
__parser.set_defaults(nested=False)
__group = OptionGroup(__parser, "Advanced Options")
__group.add_option("--format", dest="format", choices=("dtd", "mod", "ent", "plugin"),
help="Output format, one of dtd, mod, ent, zip, plugin. Defaults to plugin.", metavar="FORMAT")
__parser.set_defaults(format="plugin")
__parser.add_option_group(__group)
(options, args) = __parser.parse_args()
# read arguments
if len(args) >= 1:
if args[0] in ditagen.OUTPUT_MAP:
__topic_type_class = ditagen.OUTPUT_MAP[args[0]]
else:
__parser.error("output type %s not found, supported types: %s."
% (args[0], ", ".join(ditagen.OUTPUT_MAP.keys())))
else:
__parser.error("output type not set")
if len(args) >= 2:
if args[1] in ditagen.TOPIC_MAP[options.version]:
__parent_topic_type = ditagen.TOPIC_MAP[options.version][args[1]]()
else:
__parser.error("topic type %s not found, supported topics: %s."
% (args[1], ", ".join(TOPIC_MAP[options.version].keys())))
else:
__parser.error("topic not set")
if len(args) >= 3:
options.id = args[2]
else:
__parser.error("id not set")
if len(args) >= 4:
options.title = args[3]
else:
__parser.error("title not set")
if len(args) >= 5:
options.root = args[4]
elif (args[0] == "specialization"):
__parser.error("root not set")
if options.domains != None:
for __d in options.domains:
if __d in ditagen.DOMAIN_MAP[options.version]:
__domains.append(ditagen.DOMAIN_MAP[options.version][__d]())
else:
__parser.error("domain %s not found, supported domains: %s.".format(__d, ", ".join(ditagen.DOMAIN_MAP[options.version].keys())))
#if hasattr(options, "root") and options.root is not None:
__topic_type = __topic_type_class(options.id, options.title, __parent_topic_type,
options.owner, file=options.id) #options.root
if type(__topic_type) == ditagen.dita.SpecializationType:
__topic_type.root = ditagen.dita.create_element(__topic_type, options.root, options.id)
#elif options.format in ("mod", "ent", "zip"):
# __parser.error("cannot generate %s for base topic type.".format(options.format))
# run generator
if options.format == u"plugin":
#__dita_gen = UrnDitaGenerator()
__dita_gen = ditagen.generator.PluginGenerator()
__dita_gen.out = sys.stdout
__dita_gen.topic_type = __topic_type
__dita_gen.domains = __domains
__dita_gen.nested = options.nested
__dita_gen.version = options.version
#if hasattr(options, "title") and options.title:
# __dita_gen.set_title(options.title)
if options.stylesheet:
__dita_gen.set_stylesheet(options.stylesheet)
if options.plugin_name:
__dita_gen.plugin_name = options.plugin_name
if options.plugin_version:
__dita_gen.plugin_version = options.plugin_version
#__dita_gen.generate_public_identifier = generate_urn_identifier
__dita_gen.generate_plugin()
else:
__dita_gen = ditagen.generator.DitaGenerator()
__dita_gen.out = sys.stdout
__dita_gen.topic_type = __topic_type
__dita_gen.domains = __domains
__dita_gen.nested = options.nested
__dita_gen.version = options.version
#if hasattr(options, "title") and options.title:
# __dita_gen.set_title(options.title)
if options.format == u"dtd":
#__file_name = __dita_gen.get_file_name(__topic_type, __root, __format)
__dita_gen.generate_dtd()
elif options.format == u"mod":
#__file_name = __dita_gen.get_file_name(__topic_type, __root, __format)
__dita_gen.generate_mod()
elif options.format == u"ent":
#__file_name = __dita_gen.get_file_name(__topic_type, __root, __format)
__dita_gen.generate_ent()
#elif options.format == u"zip":
# #__file_name = __dita_gen.get_file_name(__topic_type, __root, "zip")
# __dita_gen.generate_zip(sys.stdout)
#elif __format == u"tgz":
# __file_name = __dita_gen.get_file_name(__topic_type, __root, "tar.gz")
# __dita_gen.generate_zip(sys.stdout, __topic_type, __domains, __root, __owner, __nested, __remove, __global_atts)
#elif __format == u"xzip":
# __file_name = __dita_gen.get_file_name(__topic_type, __root, "zip")
# zip_dita_gen = ditagen.generator.ZipGenerator(sys.stdout)
# zip_dita_gen.generate_zip(sys.stdout, __topic_type, __domains, __root, __owner, __nested)
if __name__ == "__main__":
main()
```
#### File: ditagen/dita/d4p.py
```python
import ditagen.dita
from ditagen.dtdgen import Attribute as Attribute
from ditagen.dtdgen import ParameterEntity as ParameterEntity
import ditagen.dita.v1_2
from ditagen.dita.v1_2 import TopicType as TopicType
from ditagen.dita.v1_2 import MapType as MapType
from ditagen.dita.v1_2 import MapGroupDomain as MapGroupDomain
from ditagen.dita.v1_2 import IndexingDomain as IndexingDomain
from ditagen.dita.v1_2 import HiDomain as HiDomain
# Elements
#####################################################################
class ArticleElement(ditagen.dita.DitaElement):
"""Article element."""
name = u"article"
cls = u"- topic/topic article/article "
model = """"(%%title;),
(%%titlealts;)?,
(%%abstract; | %%deck;)?,
(%%prolog;)?,
(%%body;)?,
(%%related-links;)?,
%(nested)s"""
attrs = [
Attribute("id", "ID", "#REQUIRED"),
Attribute("conref", "CDATA", "#IMPLIED"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
ParameterEntity("arch-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class SubsectionElement(ditagen.dita.DitaElement):
"""Subsection element."""
name = u"subsection"
cls = u"- topic/topic subsection/subsection "
model = """"(%%title;),
(%%titlealts;)?,
(%%abstract; | %%shortdesc;)?,
(%%prolog;)?,
(%%body;)?,
(%%related-links;)?,
%(nested)s"""
attrs = [
Attribute("id", "ID", "#REQUIRED"),
Attribute("conref", "CDATA", "#IMPLIED"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
ParameterEntity("arch-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class SidebarElement(ditagen.dita.DitaElement):
"""Sidebar element."""
name = u"sidebar"
cls = u"- topic/topic sidebar/sidebar "
model = """"(%%title;),
(%%titlealts;)?,
(%%abstract; | %%shortdesc;)?,
(%%prolog;)?,
(%%body;)?,
(%%related-links;)?,
%(nested)s"""
attrs = [
Attribute("id", "ID", "#REQUIRED"),
Attribute("conref", "CDATA", "#IMPLIED"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
ParameterEntity("arch-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class ChapterElement(ditagen.dita.DitaElement):
"""Chapter element."""
name = u"chapter"
cls = u"- topic/topic chapter/chapter "
model = """"(%%title;),
(%%titlealts;)?,
(%%abstract; | %%shortdesc;)?,
(%%prolog;)?,
(%%body;)?,
(%%related-links;)?,
%(nested)s"""
attrs = [
Attribute("id", "ID", "#REQUIRED"),
Attribute("conref", "CDATA", "#IMPLIED"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
ParameterEntity("arch-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class PartElement(ditagen.dita.DitaElement):
"""Part element."""
name = u"part"
cls = u"- topic/topic part/part "
model = """"(%%title;),
(%%titlealts;)?,
(%%abstract; | %%shortdesc;)?,
(%%prolog;)?,
(%%body;)?,
(%%related-links;)?,
%(nested)s"""
attrs = [
Attribute("id", "ID", "#REQUIRED"),
Attribute("conref", "CDATA", "#IMPLIED"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
ParameterEntity("arch-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class CoverElement(ditagen.dita.DitaElement):
"""Cover element."""
name = u"cover"
cls = u"- topic/topic cover/cover "
model = """"(%%title;),
(%%titlealts;)?,
(%%abstract; | %%shortdesc;)?,
(%%prolog;)?,
(%%body;)?,
(%%related-links;)?,
%(nested)s"""
attrs = [
Attribute("id", "ID", "#REQUIRED"),
Attribute("conref", "CDATA", "#IMPLIED"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
ParameterEntity("arch-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class PubmapElement(ditagen.dita.DitaElement):
"""Pubmap element."""
name = u"pubmap"
cls = u"- map/map pubmap/pubmap "
model = """(%%pubtitle;)?,
(%%pubmeta;)?,
(%%keydefs;)?,
(%%topicref;)*,
((%%mapref;) |
((%%publication;) |
(%%publication-mapref;))|
((%%covers;)?,
(%%colophon;)?,
((%%frontmatter;) |
(%%department;) |
(%%page;))*,
((%%pubbody;) |
(%%part;) |
(%%chapter;) |
(%%sidebar;) |
(%%subsection;))?,
((%%appendixes;) |
(%%appendix;) |
(%%backmatter;) |
(%%page;) |
(%%department;) |
(%%colophon;))*)),
(%%data.elements.incl; |
%%reltable;)*"""
attrs = [
Attribute("title", "CDATA", "#IMPLIED"),
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("arch-atts"),
]
# Topic types
#####################################################################
class ArticleType(TopicType):
"""Article topic type."""
id = u"article"
file = u"article"
pi_entity = None
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:article"
title = u"Article"
owner = u"DITA 4 Publishers"
parent = TopicType()
root = ArticleElement()
class SubsectionType(TopicType):
"""Subsection topic type."""
id = u"subsection"
file = u"subsection"
pi_entity = None
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:subsection"
title = u"Subsection"
owner = u"DITA 4 Publishers"
parent = TopicType()
root = SubsectionElement()
class SidebarType(TopicType):
"""Sidebar topic type."""
id = u"sidebar"
file = u"sidebar"
pi_entity = None
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:sidebar"
title = u"Sidebar"
owner = u"DITA 4 Publishers"
parent = TopicType()
root = SidebarElement()
class ChapterType(TopicType):
"""Chapter topic type."""
id = u"chapter"
file = u"chapter"
pi_entity = None
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:chapter"
title = u"Chapter"
owner = u"DITA 4 Publishers"
parent = TopicType()
root = ChapterElement()
class PartType(TopicType):
"""Part topic type."""
id = u"part"
file = u"part"
pi_entity = None
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:part"
title = u"Part"
owner = u"DITA 4 Publishers"
parent = TopicType()
root = PartElement()
class CoverType(TopicType):
"""Cover topic type."""
id = u"d4pCover"
file = u"d4pCover"
pi_entity = None
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:d4pCover"
title = u"Cover"
owner = u"DITA 4 Publishers"
parent = TopicType()
root = CoverElement()
class PubmapType(MapType):
"""Pub map type."""
id = u"pubmap"
file = u"pubmap"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:entities:dtd:pubmap"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:dtd:pubmap"
title = u"pubmap"
owner = u"DITA 4 Publishers"
parent = MapType()
root = PubmapElement()
ArticleType.required_types = [SubsectionType, SidebarType]
SubsectionType.required_types = [SidebarType]
SidebarType.required_types = [SubsectionType]
ChapterType.required_types = [SubsectionType, SidebarType]
#class ArticleType(ditagen.dita.ShellType):
# """Article Task topic type."""
# def __init__(self):
# super(ArticleType, self).__init__(u"article", u"Article", TopicType(), file=u"article")
# self.pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:article"
# self.owner = u"DITA 4 Publishers"
# Domains
#####################################################################
class FormattingDomain(ditagen.dita.Domain):
"""DITA For Publishers Formatting Domain."""
# TODO: Requires hi-d
id = u"d4p_formatting-d"
si_module = u"d4p_formattingDomain.mod"
si_entity = u"d4p_formattingDomain.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_formattingDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_formattingDomain:entities"
title = u"Formatting"
elements = [u"ph", u"p", u"foreign"]
parent = [TopicType]
required_domains = [HiDomain]
class EnumerationTopicDomain(ditagen.dita.Domain):
"""DITA For Publishers Enumeration Domain."""
id = u"d4p_enumerationTopic-d"
si_module = u"d4p_enumerationTopic.mod"
si_entity = u"d4p_enumerationTopic.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:dita:modules:d4p_enumerationTopicDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:entities:dtd:dita:d4p_enumerationTopicDomain:entities"
title = u"Enumeration"
elements = [u"data"]
parent = [TopicType]
class EnumerationMapDomain(ditagen.dita.Domain):
"""DITA For Publishers Enumeration Domain."""
id = u"d4p_enumerationMap-d"
si_module = u"d4p_enumerationMap.mod"
si_entity = u"d4p_enumerationMap.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:dita:modules:d4p_enumerationMap"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:entities:dtd:dita:d4p_enumerationMap:entities"
title = u"Enumeration"
elements = [u"topicref"]
parent = [MapType]
class SimpleEnumerationDomain(ditagen.dita.Domain):
"""DITA For Publishers Simple Enumeration Domain."""
id = u"d4p_simpleEnumeration-d"
si_module = u"d4p_simpleEnumeration.mod"
si_entity = u"d4p_simpleEnumeration.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:dita:modules:d4p_simpleEnumerationDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:entities:dtd:dita:d4p_simpleEnumerationDomain:entities"
title = u"Simple Enumeration"
elements = [u"data"]
parent = [TopicType]
class MathDomain(ditagen.dita.Domain):
"""DITA For Publishers Math Domain."""
id = u"d4p_math-d"
si_module = u"d4p_mathDomain.mod"
si_entity = u"d4p_mathDomain.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_mathDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_mathDomain:entities"
title = u"Math"
elements = [u"ph", u"p", u"fig", u"foreign"]
parent = [TopicType]
class MediaDomain(ditagen.dita.Domain):
"""DITA For Publishers Media Domain."""
id = u"d4p_media-d"
si_module = u"d4p_mediaDomain.mod"
si_entity = u"d4p_mediaDomain.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_mediaDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_mediaDomain:entities"
title = u"Media"
elements = [u"object"]
parent = [TopicType]
class ClassificationDomain(ditagen.dita.Domain):
"""DITA For Publishers Classification Domain."""
id = u"d4p_classification-d"
si_module = u"d4p_classification.mod"
si_entity = u"d4p_classification.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_classificationDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_classificationDomain:entities"
title = u"Classification"
elements = [u"data"]
parent = [TopicType]
class PubcontentDomain(ditagen.dita.Domain):
"""DITA For Publishers Pubcontent Domain."""
id = u"d4p_pubcontent-d"
si_module = u"d4p_pubcontent.mod"
si_entity = u"d4p_pubcontent.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_pubcontentDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_pubcontentDomain:entities"
title = u"Pubcontent"
elements = [u"p", u"bodydiv", u"sectiondiv"]
parent = [TopicType]
class RubyDomain(ditagen.dita.Domain):
"""DITA For Publishers Ruby Domain."""
id = u"d4p_ruby-d"
si_module = u"d4p_ruby.mod"
si_entity = u"d4p_ruby.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_rubyDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_rubyDomain:entities"
title = u"Ruby"
elements = [u"ph"]
parent = [TopicType]
class VariablesDomain(ditagen.dita.Domain):
"""DITA For Publishers Variables Domain."""
id = u"d4p_variables-d"
si_module = u"d4p_variables.mod"
si_entity = u"d4p_variables.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_variablesDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_variablesDomain:entities"
title = u"Variables"
elements = [u"data", u"text", u"keyword"]
parent = [TopicType]
class VerseDomain(ditagen.dita.Domain):
"""DITA For Publishers Verse Domain."""
id = u"d4p_verse-d"
si_module = u"d4p_verse.mod"
si_entity = u"d4p_verse.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_verseDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_verseDomain:entities"
title = u"Verse"
elements = [u"lines"]
parent = [TopicType]
class XmlDomain(ditagen.dita.Domain):
"""DITA For Publishers XML Domain."""
id = u"xml-d"
si_module = u"xml.mod"
si_entity = u"xml.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:xml:declarations"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:xml:entities"
title = u"XML"
elements = [u"keyword"]
parent = [TopicType]
class PubmapDomain(ditagen.dita.Domain):
"""DITA For Publishers Pubmap Domain."""
id = u"pubmap-d"
si_module = u"pubmap.mod"
si_entity = u"pubmap.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:dtd:pubmapDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:entities:dtd:pubmapDomain"
title = u"Pubmap"
attributes = [u"topicref", u"title"]
parent = [MapType]
class PubmapMaprefDomain(ditagen.dita.Domain):
"""DITA For Publishers Pubmap Mapref Domain."""
id = u"pubmapMapref-d"
si_module = u"pubmapMapref.mod"
si_entity = u"pubmapMapref.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:dtd:pubmapMaprefDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:entities:dtd:pubmapMaprefDomain"
title = u"PubmapMapref"
attributes = [u"topicref", u"appendix", u"appendixes", u"article", u"backmatter", u"chapter", u"covers", u"department", u"glossary", u"keydef-group", u"part", u"pubbody", u"publication", u"subsection", u"sidebar", u"wrap-cover"]
parent = [MapType]
class PubmetadataDomain(ditagen.dita.Domain):
"""DITA For Publishers Pubmetadata Domain."""
id = u"pubmetadata-d"
si_module = u"pubmetadata.mod"
si_entity = u"pubmetadata.ent"
pi_module = u"urn:pubid:dita4publishers.sourceforge.net:modules:dtd:pubmetadataDomain"
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:entities:dtd:pubmetadataDomain"
title = u"Pubmetadata"
attributes = [u"topicmeta"]
parent = [MapType]
class RenditionTargetAttDomain(ditagen.dita.v1_2.AttributeDomain):
"""DITA For Publishers Rendition Target Attribute Domain."""
id = u"d4p_renditionTargetAtt-d"
si_module = None
si_entity = u"d4p_renditionTarget.ent"
pi_module = None
pi_entity = u"urn:pubid:dita4publishers.sourceforge.net:doctypes:dita:modules:d4p_renditionTargetAttDomain:entities"
title = u"Rendition Target Attribute"
attributes = [u"props"]
parent = [TopicType]
# Defaults
__commonDomains = [FormattingDomain, EnumerationTopicDomain, SimpleEnumerationDomain, MathDomain, MediaDomain, ClassificationDomain, PubcontentDomain, RubyDomain, VariablesDomain, VerseDomain, XmlDomain, RenditionTargetAttDomain]
ArticleType.default_domains = __commonDomains
ChapterType.default_domains = __commonDomains
CoverType.default_domains = __commonDomains
PartType.default_domains = __commonDomains
SidebarType.default_domains = __commonDomains
SubsectionType.default_domains = __commonDomains
PubmapType.default_domains = [MapGroupDomain, PubmapDomain, PubmapMaprefDomain, PubmetadataDomain, EnumerationMapDomain, SimpleEnumerationDomain, VariablesDomain, IndexingDomain]
```
#### File: ditagen/dita/v1_2.py
```python
import ditagen.dita
from ditagen.dtdgen import Particle as Particle
from ditagen.dtdgen import Choice as Choice
from ditagen.dtdgen import Name as Name
from ditagen.dtdgen import Seq as Seq
from ditagen.dtdgen import Attribute as Attribute
from ditagen.dtdgen import Param as Param
from ditagen.dtdgen import ParameterEntity as ParameterEntity
# Elements
#####################################################################
OPTIONAL = Particle.Occurrences.OPTIONAL
ZERO_OR_MORE = Particle.Occurrences.ZERO_OR_MORE
class TopicElement(ditagen.dita.DitaElement):
"""Topic element."""
name = u"topic"
cls = u"- topic/topic "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("body"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class ConceptElement(ditagen.dita.DitaElement):
"""Concept element."""
name = u"concept"
cls = u"- topic/topic concept/concept "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("conbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class TaskElement(ditagen.dita.DitaElement):
"""Task element."""
name = u"task"
cls = u"- topic/topic task/task "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("taskbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class ReferenceElement(ditagen.dita.DitaElement):
"""Reference element."""
name = u"reference"
cls = u"- topic/topic reference/reference "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("refbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class GlossentryElement(ditagen.dita.DitaElement):
"""Glossary entry element."""
name = u"glossentry"
cls = u"- topic/topic concept/concept glossentry/glossentry "
model = Seq([
Choice(ParameterEntity("glossterm")),
Choice(ParameterEntity("glossdef"), OPTIONAL),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("glossBody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class GlossgroupElement(ditagen.dita.DitaElement):
"""Glossary group element."""
name = u"glossgroup"
cls = u"- topic/topic concept/concept glossgroup/glossgroup "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class LearningBaseElement(ditagen.dita.DitaElement):
"""Learning Base element."""
name = u"learningBase"
cls = u"- topic/topic learningBase/learningBase "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningBasebody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class LearningAssessmentElement(ditagen.dita.DitaElement):
"""Learning Assessment element."""
name = u"learningAssessment"
cls = u"- topic/topic learningBase/learningBase learningAssessment/learningAssessment "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningAssessmentbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningOverviewElement(ditagen.dita.DitaElement):
"""Learning Overview element."""
name = u"learningOverview"
cls = u"- topic/topic learningBase/learningBase learningOverview/learningOverview "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningOverviewbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningPlanElement(ditagen.dita.DitaElement):
"""Learning Plan element."""
name = u"learningPlan"
cls = u"- topic/topic learningBase/learningBase learningPlan/learningPlan "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningPlanbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningSummaryElement(ditagen.dita.DitaElement):
"""Learning Summary element."""
name = u"learningSummary"
cls = u"- topic/topic learningBase/learningBase learningSummary/learningSummary "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningSummarybody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningContentElement(ditagen.dita.DitaElement):
"""Learning Content element."""
name = u"learningContent"
cls = u"- topic/topic learningBase/learningBase learningContent/learningContent "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningContentbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class SubjectSchemeElement(ditagen.dita.DitaElement):
"""Subject scheme element."""
name = u"subjectScheme"
cls = u"- map/map subjectScheme/subjectScheme "
model = Seq([
Choice(ParameterEntity("title"), OPTIONAL),
Choice(ParameterEntity("topicmeta"), OPTIONAL),
Choice([
ParameterEntity("anchor"),
ParameterEntity("data.elements.incl"),
ParameterEntity("enumerationdef"),
ParameterEntity("hasInstance"),
ParameterEntity("hasKind"),
ParameterEntity("hasNarrower"),
ParameterEntity("hasPart"),
ParameterEntity("hasRelated"),
ParameterEntity("navref"),
ParameterEntity("relatedSubjects"),
ParameterEntity("reltable"),
ParameterEntity("schemeref"),
ParameterEntity("subjectdef"),
ParameterEntity("subjectHead"),
ParameterEntity("subjectRelTable"),
ParameterEntity("topicref")
], ZERO_OR_MORE)
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
class MapElement(ditagen.dita.DitaElement):
"""Map element."""
name = u"map"
cls = u"- map/map "
model = Seq([
Choice(ParameterEntity("title"), OPTIONAL),
Choice(ParameterEntity("topicmeta"), OPTIONAL),
Choice([
ParameterEntity("anchor"),
ParameterEntity("data.elements.incl"),
ParameterEntity("navref"),
ParameterEntity("reltable"),
ParameterEntity("topicref")
], ZERO_OR_MORE)
])
attrs = [
Attribute("title", "CDATA", "#IMPLIED"),
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
class BookMapElement(ditagen.dita.DitaElement):
"""BookMap element."""
name = u"bookmap"
cls = u"- map/map bookmap/bookmap "
model = Seq([
Choice([Choice(ParameterEntity("title")), Choice(ParameterEntity("booktitle"))], OPTIONAL),
Choice(ParameterEntity("bookmeta"), OPTIONAL),
Choice(ParameterEntity("frontmatter"), OPTIONAL),
Choice(ParameterEntity("chapter"), ZERO_OR_MORE),
Choice(ParameterEntity("part"), ZERO_OR_MORE),
Choice([Choice(ParameterEntity("appendices"), OPTIONAL), Choice(ParameterEntity("appendix"), ZERO_OR_MORE)]),
Choice(ParameterEntity("backmatter"), OPTIONAL),
Choice(ParameterEntity("reltable"), ZERO_OR_MORE)
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
# Topic types
#####################################################################
class TopicType(ditagen.dita.Type):
"""Topic topic type."""
id = u"topic"
file = u"base/dtd/topic" # the .dtd file is at technicalContent
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Topic//EN"
title = u"Topic"
parent = None
root = TopicElement()
class ConceptType(TopicType):
"""Concept topic type."""
id = u"concept"
file = u"technicalContent/dtd/concept"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Concept//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Concept//EN"
title = u"Concept"
parent = TopicType()
root = ConceptElement()
class TaskType(TopicType):
"""Task topic type."""
id = u"task"
file = u"technicalContent/dtd/task"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Task//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Task//EN"
title = u"Task"
parent = TopicType()
root = TaskElement()
def __init__(self):
super(TaskType, self).__init__()
#self.required_domains = [StrictTaskbodyConstraints]
class GeneralTaskType(ditagen.dita.ShellType):
"""General Task topic type."""
def __init__(self):
super(GeneralTaskType, self).__init__(u"generalTask", u"General Task", TaskType())
#self.parent.required_domains = []
class ReferenceType(TopicType):
"""Reference topic type."""
id = u"reference"
file = u"technicalContent/dtd/reference"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Reference//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Reference//EN"
title = u"Reference"
parent = TopicType()
root = ReferenceElement()
class MapType(ditagen.dita.Type):
"""Map topic type."""
id = u"map"
file = u"base/dtd/map" # the .dtd file is at technicalContent
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Map//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Map//EN"
title = u"Map"
parent = None
root = MapElement()
class BookMapType(MapType):
"""BookMap topic type."""
id = u"bookmap"
file = u"bookmap/dtd/bookmap"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 BookMap//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 BookMap//EN"
title = u"BookMap"
parent = MapType()
root = BookMapElement()
class GlossentryType(ConceptType):
"""Glossary entry topic type."""
id = u"glossentry"
file = u"technicalContent/dtd/glossentry"
pi_entity = u"-//OASIS//ENTITIES DITA Glossary Entry//EN"
pi_module = u"-//OASIS//ELEMENTS DITA Glossary Entry//EN"
title = u"Glossary Entry"
parent = ConceptType()
root = GlossentryElement()
class GlossgroupType(ConceptType):
"""Glossary group topic type."""
id = u"glossgroup"
file = u"technicalContent/dtd/glossgroup"
pi_entity = u"-//OASIS//ENTITIES DITA Glossary Group//EN"
pi_module = u"-//OASIS//ELEMENTS DITA Glossary Group//EN"
title = u"Glossary Group"
parent = ConceptType()
root = GlossgroupElement()
class MachineryTaskType(ditagen.dita.ShellType):
"""Machinery Task topic type."""
def __init__(self):
super(MachineryTaskType, self).__init__(u"machineryTask", u"Machinery Task", TaskType(), file=u"machineryIndustry/dtd/machineryTask")
#self.parent.required_domains = [MachineryTaskbodyConstraints]
class LearningBaseType(TopicType):
"""Learning Base topic type."""
id = u"learningBase"
file = u"learning/dtd/learningBase"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Base//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Base//EN"
title = u"Learning Base"
parent = TopicType()
root = LearningBaseElement()
class LearningAssessmentType(LearningBaseType):
"""Learning Assessment topic type."""
id = u"learningAssessment"
file = u"learning/dtd/learningAssessment"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Assessment//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Assessment//EN"
title = u"Learning Assessment"
parent = LearningBaseType()
root = LearningAssessmentElement()
class LearningOverviewType(LearningBaseType):
"""Learning Overview topic type."""
id = u"learningOverview"
file = u"learning/dtd/learningOverview"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Overview//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Overview//EN"
title = u"Learning Overview"
parent = LearningBaseType()
root = LearningOverviewElement()
class LearningPlanType(LearningBaseType):
"""Learning Plan topic type."""
id = u"learningPlan"
file = u"learning/dtd/learningPlan"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Plan//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Plan//EN"
title = u"Learning Plan"
parent = LearningBaseType()
root = LearningPlanElement()
class LearningSummaryType(LearningBaseType):
"""Learning Summary topic type."""
id = u"learningSummary"
file = u"learning/dtd/learningSummary"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Summary//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Summary//EN"
title = u"Learning Summary"
parent = LearningBaseType()
root = LearningSummaryElement()
class LearningContentType(LearningBaseType):
"""Learning Content topic type."""
id = u"learningContent"
file = u"learning/dtd/learningContent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Content//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Content//EN"
title = u"Learning Content"
parent = LearningBaseType()
root = LearningContentElement()
def __init__(self):
super(LearningContentType, self).__init__()
self.required_types = [TaskType, ConceptType, ReferenceType, LearningSummaryType, LearningAssessmentType]
class LearningMapType(ditagen.dita.ShellType):
"""Learning Map topic type."""
def __init__(self):
super(LearningMapType, self).__init__(u"learningMap", u"Learning Map", MapType(), file=u"learning/dtd/learningMap")
#self.parent.required_domains = []
class LearningBookMapType(ditagen.dita.ShellType):
"""Learning BookMap topic type."""
def __init__(self):
super(LearningBookMapType, self).__init__(u"learningBookmap", u"Learning BookMap", BookMapType(), file=u"learning/dtd/learningBookmap")
#self.parent.required_domains = []
class ClassificationMapType(ditagen.dita.ShellType):
"""Classification Map topic type."""
def __init__(self):
super(ClassificationMapType, self).__init__(u"classifyMap", u"Classification Map", MapType(), file=u"subjectScheme/dtd/classifyMap")
#self.parent.required_domains = []
class SubjectSchemeType(MapType):
"""Subject Scheme Map topic type."""
id = u"subjectScheme"
file = u"subjectScheme/dtd/subjectScheme"
title = u"Subject Scheme Map"
parent = MapType()
root = SubjectSchemeElement()
# Domains
#####################################################################
class Constraints(ditagen.dita.DomainBase):
"""Base class for constraints."""
# file_suffix = u""
pi_suffix = u" Constraint"
elements = []
att_id = None
def get_file_name(self, extension):
return self.file + self.file_suffix + "." + extension
class AttributeDomain(ditagen.dita.DomainBase):
"""Base class for attribute domains."""
# file_suffix = u"Att"
pi_suffix = u" Attribute Domain"
#elements = []
attributes = []
def get_file_name(self, extension):
return self.file + self.file_suffix + "." + extension
# Domains
class UiDomain(ditagen.dita.Domain):
"""User interface domain."""
id = u"ui-d"
si_module = u"technicalContent/dtd/uiDomain.mod"
si_entity = u"technicalContent/dtd/uiDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 User Interface Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 User Interface Domain//EN"
title = u"User Interface"
elements = [u"pre", u"keyword", u"ph"]
parent = [TopicType]
class HiDomain(ditagen.dita.Domain):
"""Hilight domain."""
id = u"hi-d"
si_module = u"base/dtd/highlightDomain.mod"
si_entity = u"base/dtd/highlightDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Highlight Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Highlight Domain//EN"
title = u"Highlight"
elements = [u"ph"]
parent = [TopicType]
class PrDomain(ditagen.dita.Domain):
"""Programmign domain."""
id = u"pr-d"
si_module = u"technicalContent/dtd/programmingDomain.mod"
si_entity = u"technicalContent/dtd/programmingDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Programming Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Programming Domain//EN"
title = u"Programming"
elements = [u"pre", u"keyword", u"ph", u"fig", u"dl"]
parent = [TopicType]
class SwDomain(ditagen.dita.Domain):
"""Software development domain."""
id = u"sw-d"
si_module = u"technicalContent/dtd/softwareDomain.mod"
si_entity = u"technicalContent/dtd/softwareDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Software Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Software Domain//EN"
title = u"Software"
elements = [u"pre", u"keyword", u"ph"]
parent = [TopicType]
class UtDomain(ditagen.dita.Domain):
"""Utilities domain."""
id = u"ut-d"
si_module = u"base/dtd/utilitiesDomain.mod"
si_entity = u"base/dtd/utilitiesDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Utilities Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Utilities Domain//EN"
title = u"Utilities"
elements = [u"fig"]
parent = [TopicType]
class IndexingDomain(ditagen.dita.Domain):
"""Indexing domain."""
id = u"indexing-d"
si_module = u"base/dtd/indexingDomain.mod"
si_entity = u"base/dtd/indexingDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Indexing Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Indexing Domain//EN"
title = u"Indexing"
elements = [u"index-base"]
parent = [TopicType, MapType]
class LearningDomain(ditagen.dita.Domain):
"""Learning domain."""
id = u"learning-d"
si_module = u"learning/dtd/learningDomain.mod"
si_entity = u"learning/dtd/learningDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Domain//EN"
title = u"Learning"
elements = [u"note", u"fig"]
# XXX: This builds on
parent = [TopicType]
required_domains = [UtDomain]
class LearningMetaDomain(ditagen.dita.Domain):
"""Learning metadata domain."""
id = u"learningmeta-d"
si_module = u"learning/dtd/learningMetadataDomain.mod"
si_entity = u"learning/dtd/learningMetadataDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Metadata Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Metadata Domain//EN"
title = u"Learning Metadata"
elements = [u"metadata"]
parent = [TopicType]
class LearningMapDomain(ditagen.dita.Domain):
"""Learning map domain."""
id = u"learningmap-d"
si_module = u"learning/dtd/learningMapDomain.mod"
si_entity = u"learning/dtd/learningMapDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Map Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Map Domain//EN"
title = u"Learning Map"
elements = [u"topicref"]
parent = [MapType]
class TaskRequirementsDomain(ditagen.dita.Domain):
"""Task requirements domain."""
id = u"taskreq-d"
si_module = u"technicalContent/dtd/taskreqDomain.mod"
si_entity = u"technicalContent/dtd/taskreqDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Task Requirements Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Task Requirements Domain//EN"
title = u"Machine Industry Task"
elements = [u"prereq", u"postreq"]
parent = [TaskType]
class HazardStatementDomain(ditagen.dita.Domain):
"""Hazard statement domain."""
id = u"hazard-d"
si_module = u"base/dtd/hazardstatementDomain.mod"
si_entity = u"base/dtd/hazardstatementDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Hazard Statement Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Hazard Statement Domain//EN"
title = u"Hazard Statement"
elements = [u"note"]
parent = [TopicType]
class MapGroupDomain(ditagen.dita.Domain):
"""Map group domain."""
id = u"mapgroup-d"
si_module = u"base/dtd/mapGroup.mod"
si_entity = u"base/dtd/mapGroup.ent" # This is an exception to DITA's naming scheme
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Map Group Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Map Group Domain//EN"
title = u"Map Group"
elements = [u"topicref"]
parent = [MapType]
class AbbreviatedFormDomain(ditagen.dita.Domain):
"""Abbreviated form domain."""
id = u"abbrev-d"
si_module = u"technicalContent/dtd/abbreviateDomain.mod"
si_entity = u"technicalContent/dtd/abbreviateDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Abbreviated Form Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Abbreviated Form Domain//EN"
title = u"Abbreviated Form"
elements = [u"term"]
parent = [TopicType]
class XNALDomain(ditagen.dita.Domain):
"""XNAL domain."""
id = u"xnal-d"
si_module = u"xnal/dtd/xnalDomain.mod"
si_entity = u"xnal/dtd/xnalDomain.ent"
title = u"XNAL"
elements = [u"author"]
parent = [MapType]
class UserDelayedResolutionDomain(ditagen.dita.Domain):
"""User delayed resolution domain."""
id = u"delay-d"
si_module = u"base/dtd/delayResolutionDomain.mod"
si_entity = u"base/dtd/delayResolutionDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Delayed Resolution Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Delayed Resolution Domain//EN"
title = u"Delayed Resolution"
elements = [u"keywords"]
parent = [TopicType, MapType]
class ClassifyDomain(ditagen.dita.Domain):
"""Classify domain."""
id = u"classify-d"
si_module = u"subjectScheme/dtd/classifyDomain.mod"
si_entity = u"subjectScheme/dtd/classifyDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Classification Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Classification Domain//EN"
title = u"Map Subject Classification"
elements = [u"topicref", u"reltable"]
parent = [TopicType, MapType]
class GlossaryReferenceDomain(ditagen.dita.Domain):
"""Glossary reference domain."""
id = u"glossref-d"
si_module = u"technicalContent/dtd/glossrefDomain.mod"
si_entity = u"technicalContent/dtd/glossrefDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Glossary Reference Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Glossary Reference Domain//EN"
title = u"Glossary Reference"
elements = [u"topicref"]
parent = [MapType]
# Constraints
class StrictTaskbodyConstraints(Constraints):
"""Strict taskbody constraints."""
id = u"strictTaskbody-c"
si_module = u"technicalContent/dtd/strictTaskbodyConstraint.mod"
si_entity = u"technicalContent/dtd/strictTaskbodyConstraint.ent"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Strict Taskbody Constraint//EN"
title = u"Strict Taskbody"
parent = [TaskType]
att_id = u"taskbody"
class MachineryTaskbodyConstraints(Constraints):
"""Machinery taskbody constraints."""
id = u"machineryTaskbody-c"
si_module = u"machineryIndustry/dtd/machineryTaskbodyConstraint.mod"
si_entity = u"machineryIndustry/dtd/machineryTaskbodyConstraint.ent"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Machinery Taskbody Constraint//EN"
title = u"Machinery Taskbody"
parent = [TaskType]
att_id = u"taskbody"
# Defaults
TopicType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
ConceptType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
TaskType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain, StrictTaskbodyConstraints]
GeneralTaskType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
ReferenceType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
MachineryTaskType.default_domains = [TaskRequirementsDomain, HazardStatementDomain, HiDomain, UtDomain, IndexingDomain, PrDomain, SwDomain, UiDomain, MachineryTaskbodyConstraints]
MapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, GlossaryReferenceDomain]
BookMapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, XNALDomain]
ClassificationMapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, ClassifyDomain]
SubjectSchemeType.default_domains = [MapGroupDomain]
LearningAssessmentType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningBookMapType.default_domains = [LearningMapDomain, LearningMetaDomain, MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, XNALDomain]
LearningContentType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningMapType.default_domains = [LearningMapDomain, LearningMetaDomain, MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain]
LearningOverviewType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningPlanType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningSummaryType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
GlossentryType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
GlossgroupType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
```
#### File: src/ditagen/generator_test.py
```python
import unittest
import ditagen.generator
import StringIO
class DtdGeneratorTestCase(unittest.TestCase):
def setUp(self):
self.generator = ditagen.generator.DtdGenerator()
def tearDown(self):
self.generator = None
def test_generate_pi(self):
self.assertEqual(self.generator.generate_pi(u"owner", u"description"),
u"-//owner//description//EN")
def test_external_general_entity_system(self):
__out = StringIO.StringIO()
self.generator.set_output(__out)
self.generator.external_general_entity(u"name", u"system")
self.assertEqual(__out.getvalue(),
u"""<!ENTITY name SYSTEM "system">\n""")
def test_external_general_entity_public(self):
__out = StringIO.StringIO()
self.generator.set_output(__out)
self.generator.external_general_entity(u"name", u"system", u"public")
self.assertEqual(__out.getvalue(),
u"""<!ENTITY name PUBLIC "public" "system">\n""")
def test_internal_general_entity(self):
__out = StringIO.StringIO()
self.generator.set_output(__out)
self.generator.internal_general_entity(u"name", u"value")
self.assertEqual(__out.getvalue(),
u"""<!ENTITY name "value">\n""")
def test_external_parameter_entity_system(self):
__out = StringIO.StringIO()
self.generator.set_output(__out)
self.generator.external_parameter_entity(u"name", u"system")
self.assertEqual(__out.getvalue(),
u"""<!ENTITY % name SYSTEM "system">\n""")
def test_external_parameter_entity_public(self):
__out = StringIO.StringIO()
self.generator.set_output(__out)
self.generator.external_parameter_entity(u"name", u"system", u"public")
self.assertEqual(__out.getvalue(),
u"""<!ENTITY % name\n PUBLIC "public"\n "system">\n""")
def test_internal_parameter_entity(self):
__out = StringIO.StringIO()
self.generator.set_output(__out)
self.generator.internal_parameter_entity(u"name", u"value")
self.assertEqual(__out.getvalue(),
u"""<!ENTITY % name "value">\n""")
def test_element_declaration(self):
__out = StringIO.StringIO()
self.generator.set_output(__out)
self.generator.element_declaration(u"name", u"model")
self.assertEqual(__out.getvalue(),
u"""<!ELEMENT name (model)>\n""")
def test_attribute_declaration(self):
__out = StringIO.StringIO()
self.generator.set_output(__out)
self.generator.attribute_declaration(u"name", u"attrs")
self.assertEqual(__out.getvalue(),
u"""<!ATTLIST name attrs>\n""")
def test_parameter_entity_ref(self):
__out = StringIO.StringIO()
self.generator.set_output(__out)
self.generator.parameter_entity_ref(u"name")
self.assertEqual(__out.getvalue(),
u"%name;")
def test_unique(self):
pairs = [
(["a", "b", "c"],
["a", "b", "c"]),
(["a", "b", "a"],
["a", "b"]),
(["a", "a", "a"],
["a"])
]
for i in pairs:
self.assertEqual(ditagen.generator.unique(i[0]), i[1])
class DitaGeneratorTestCase(unittest.TestCase):
def setUp(self):
self.generator = ditagen.generator.DitaGenerator()
def tearDown(self):
self.generator = None
def test_set_topic_type(self):
self.assertRaises(AssertionError , self.generator.set_topic_type, "type_topic")
if __name__ == '__main__':
unittest.main()
```
#### File: src/ditagen/web.py
```python
import sys
import cgitb; cgitb.enable()
import ditagen.dita
import ditagen.dtdgen
import ditagen.dita.v1_1
import ditagen.dita.v1_2
import ditagen.generator
def print_error(__msg):
print_response_headers(None, 500, __msg)
print __msg
sys.exit()
def print_response_headers(__file_name, __code=200, __msg="Ok"):
print u"Status: %d %s" % (__code, __msg)
print u"Content-Type: text/plain; charset=UTF-8"
# print u"Content-disposition: attachment; file_name=%s.%s" % (__root, __f)
#print u"Content-disposition: file_name=%s" % __file_name #__dita.getfileName(__type, __root, __f)
print
def main(form):
"""Main method."""
__topic_type = None
__output_type = None
__id = None
__root = None
__owner = None
__nested = None
#__remove = {}
#__global_atts = None
__format = None
__domains = []
#__types = []
__version = "1.1"
__plugin_name = None
__stylesheet = None
__title = None
__file = None
try:
# read arguments
if u"version" in form:
__version = form.getfirst(u"version")
if __version not in ("1.1", "1.2"):
raise ValueError()
else:
print_error("version missing")
# get domains
for __d in form.getlist(u"domain"):
if __d in ditagen.DOMAIN_MAP[__version]:
__domains.append(ditagen.DOMAIN_MAP[__version][__d]())
# get type
__t = form.getfirst(u"type")
if __t in ditagen.TOPIC_MAP[__version]:
__topic_type = ditagen.TOPIC_MAP[__version][__t]()
__o = form.getfirst(u"output")
if __o in ditagen.OUTPUT_MAP:
__output_type = ditagen.OUTPUT_MAP[__o]
# get arguments
if u"id" in form:
__id = form.getfirst(u"id")
else:
print_error("id missing")
if u"root" in form:
__root = form.getfirst(u"root")
if u"owner" in form:
__owner = form.getfirst(u"owner")
else:
print_error("owner missing")
if u"title" in form:
__title = form.getfirst(u"title")
else:
print_error("title missing")
#if not __title:
# __title = __id.capitalize()
__nested = u"nested" in form
#__remove = dict([(n, True) for n in form.getlist("remove")])
#__global_atts = None#form.getfirst(u"attribute")
if u"file" in form:
__format = form.getfirst(u"file")
else:
print_error("file missing")
__stylesheet = form.getfirst(u"stylesheet")
__file = __id
#if __id is not None:
__topic_type = __output_type(__id, __title, __topic_type,
__owner, __file)#__root
if __topic_type == ditagen.dita.SpecializationType:
__topic_type.root = ditagen.dita.create_element(__topic_type, __root, __id)
# else would be reshelling
except:
#print u"HTTP/1.1 400 Invalid arguments"
#print
raise
# run generator
if __format== u"plugin" or not __format:
__dita_gen = ditagen.generator.PluginGenerator()
__dita_gen.out = sys.stdout
__dita_gen.topic_type = __topic_type
if not len(__domains) == 0:
__dita_gen.domains = __domains
__dita_gen.nested = __nested
__dita_gen.version = __version
#__dita_gen.set_title(__title)
if __stylesheet:
__dita_gen.set_stylesheet(__stylesheet)
if __plugin_name != None:
__dita_gen.plugin_name = __plugin_name
if __plugin_version != None:
__dita_gen.plugin_version = __plugin_version
__file_name = __dita_gen.get_file_name(__topic_type, __file, "zip")
print u"Status: 200 Ok"
#print u"Content-type: application/zip"
print u"Content-disposition: attachment; filename={0}".format(__file_name)
print
__dita_gen.generate_plugin()
else:
__dita_gen = ditagen.generator.DitaGenerator()
__dita_gen.out = sys.stdout
__dita_gen.topic_type = __topic_type
if not len(__domains) == 0:
__dita_gen.domains = __domains
__dita_gen.nested = __nested
__dita_gen.version = __version
__file_name = __dita_gen.get_file_name(__topic_type, __file, __format)
print_response_headers(__file_name)
if __format == u"dtd":
__dita_gen.generate_dtd()
elif __format == u"mod":
__dita_gen.generate_mod()
elif __format == u"ent":
__dita_gen.generate_ent()
if __name__ == "__main__":
main()
```
|
{
"source": "Jelso13/DancingLinksSudoku",
"score": 3
}
|
#### File: Jelso13/DancingLinksSudoku/solve.py
```python
import numpy as np
import sys
from project.Sudoku import Sudoku
def sudoku_solver(sudoku):
sudoku = np.array(sudoku)
s = Sudoku()
solExample, fExample = s.solve(sudoku.astype(int))
x = s.returnSol(solExample, fExample)
return x
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) < 1:
print("Please provide a sudoku as a file or plain text...")
else:
for arg in args:
if arg[-4:] == ".txt":
s = []
with open(arg, "r") as f:
for line in f.readlines():
s.append([int(d) for d in [c for c in line] if d.isdigit()])
print(sudoku_solver(np.array(s)))
else:
print(sudoku_solver(np.reshape(np.array([int(c) for c in arg if c.isdigit()]), (-1, 9))))
print()
```
|
{
"source": "jelson/aqi",
"score": 3
}
|
#### File: aqi/v1/graph-data.py
```python
import pandas
import sys
import os
import json
import aqi
import datetime
import psycopg2
import psycopg2.extras
import matplotlib.patches as patches
def get_data(filename):
lines = open(filename).readlines()
lines = map(lambda x: x.rstrip(), lines)
def get_fields(line):
l = line.split(']')
retval = {}
retval['date'] = l[0][1:]
retval.update(json.loads(l[1]))
return retval
lines = map(get_fields, lines)
df = pandas.DataFrame(lines)
df['date'] = pandas.to_datetime(
df['date'],
format='%d/%m/%y - %H:%M:%S:%f',
)
df = df.set_index('date')
df = df.dropna()
df['aqi'] = df['pm2.5'].apply(lambda x: float(aqi.to_iaqi(
aqi.POLLUTANT_PM25, x, algo=aqi.ALGO_EPA)))
print(df)
return df
def insert(df):
conn = psycopg2.connect(database="airquality")
cursor = conn.cursor()
now = datetime.datetime.now()
data = [
(now - datetime.timedelta(seconds=10), 10, 25, 100, 1000),
(now - datetime.timedelta(seconds=9), 11, 26, 101, 1001),
(now - datetime.timedelta(seconds=8), 12, 27, 102, 1002),
(now - datetime.timedelta(seconds=7), 13, 28, 103, 1003),
]
insert_query = 'insert into particulate (time, pm10, pm25, pm100, aqi) values %s'
df = df.copy()
df.reset_index(inplace=True)
psycopg2.extras.execute_values(
cursor,
insert_query,
df.values,
template=None,
)
conn.commit()
def graph(df):
df = df.rolling(60).mean()
plot = df[['pm1.0', 'pm2.5', 'pm10.0']].plot(grid=True, figsize=(20, 10))
plot.set_title("Jer - Particulate Concentrations\n60-second rolling avg of 1hz data")
plot.set_xlabel("date/time")
plot.set_ylabel("ug/m3")
plotname = os.path.splitext(filename)[0] + ".png"
plot.figure.savefig(plotname)
plot = df[['aqi']].plot(grid=True, figsize=(20, 10))
plot.set_title("Jer - AQI from PM2.5\n60-second rolling avg of 1hz data")
plot.set_ylim(bottom=0)
plot.set_xlabel("date/time")
plot.set_ylabel("AQI from PM2.5")
rec = patches.Rectangle((0, 0), width=2000, height=50, fill=True,facecolor='green')
plot.add_patch(rec)
plotname = os.path.splitext(filename)[0] + ".aqi.png"
plot.figure.savefig(plotname)
def main():
filename = sys.argv[1]
df = get_data(filename)
insert(df)
if __name__ == "__main__":
main()
```
#### File: aqi/v2/liveread.py
```python
import aqi
import argparse
import datetime
import json
import psycopg2
import psycopg2.extras
import sys
MAX_CACHE_SIZE = 30
logfile = sys.stdout
def say(s):
if logfile:
logfile.write(s)
logfile.write("\n")
def insert_batch(db, data):
sys.stderr.write(f"inserting {len(data)} records\n")
insert_query = 'insert into particulate (time, pm10, pm25, pm100, aqi) values %s'
cursor = db.cursor()
psycopg2.extras.execute_values(
cursor,
insert_query,
data,
template=None,
)
db.commit()
def line_arrived(cache, db, t, line):
data = json.loads(line)
printable_data = data.copy()
printable_data['time'] = t.timestamp()
printable_data['ftime'] = t.strftime("%Y-%m-%d %H:%M:%S.%f")
say(json.dumps(printable_data))
sys.stdout.flush()
data['time'] = t
data['aqi'] = int(aqi.to_iaqi(
aqi.POLLUTANT_PM25,
data['pm2.5'],
algo=aqi.ALGO_EPA))
db_record = [
data['time'],
data['pm1.0'],
data['pm2.5'],
data['pm10.0'],
data['aqi'],
]
cache.append(db_record)
if len(cache) >= MAX_CACHE_SIZE:
insert_batch(db, cache)
cache.clear()
def read_forever(db, f):
cache = []
while True:
line = f.readline()
if not line:
say("Got EOF! Terminating")
return
line = line.rstrip()
if line:
line_arrived(cache, db, datetime.datetime.now(), line)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--port",
help="Port to read from",
action='store',
required='true',
)
parser.add_argument(
"-l", "--log",
help='Filename to log to',
action='store'
)
args = parser.parse_args()
say(f"Starting; args: {args}")
if args.log:
global logfile
logfile = open(args.log, "a")
infile = open(args.port, "r")
say("Opened file")
db = psycopg2.connect(database="airquality")
read_forever(db, infile)
say("Read failed!")
main()
```
#### File: v3/client/datacache.py
```python
import os
import sys
import threading
import time
# project libraries
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from common.mylogging import say
import httpclient
class DataCache(threading.Thread):
def __init__(self, args):
threading.Thread.__init__(self)
self.args = args
self.daemon = True
self.cache = []
self.client = httpclient.DataClient(args)
self.lock = threading.Lock()
self.start()
def append(self, record):
with self.lock:
self.cache.append(record)
if self.args.verbose:
say(f"got record: {record}")
def set_send_callback(self, cb):
self.client.set_send_callback(cb)
def run(self):
to_xmit = []
while True:
# Move any records in the cache into a local variable to transmit to
# the server
with self.lock:
to_xmit.extend(self.cache)
self.cache.clear()
# If there is anything to transmit, try to send them to the server
if len(to_xmit) > 0:
# Try to send the locally stored records to the server
retval = self.client.insert_batch(to_xmit)
# If the send was successful, discard these records. Otherwise, save
# them so we can try to send them again next time around.
if retval:
to_xmit.clear()
# Wait until it's time to transmit again
time.sleep(15)
```
#### File: v3/integrations/aqi-fan-integration.py
```python
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GLib
import dbus
import os
import psycopg2
import sys
# project libraries
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import server.pms5003db as pms5003db
from common.mylogging import say
import nest_controller
import tplink_controller
CONFIG = [
{
'sensorname': 'jer-office',
'datatype': 'aqi2.5',
'on-thresh': 35,
'off-thresh': 10,
'averaging-sec': 60,
'onoff-func': nest_controller.NestController('Jer Entryway').fan_control,
},
{
'sensorname': 'jer-bedroom',
'datatype': 'aqi2.5',
'on-thresh': 35,
'off-thresh': 10,
'averaging-sec': 60,
'onoff-func': nest_controller.NestController('Jer Bedroom').fan_control,
},
{
'sensorname': 'gracie-bedroom',
'datatype': 'aqi2.5',
'on-thresh': 35,
'off-thresh': 10,
'averaging-sec': 60,
'onoff-func': tplink_controller.TPLinkController('Gracie Fan').set_plug_state,
},
]
class AQIChangeHandler:
def __init__(self, bus):
bus.add_signal_receiver(
self.NewDataAvailable,
path='/org/lectrobox/aqi',
signal_name='NewDataAvailable',
)
self.pmsdb = pms5003db.PMS5003Database()
# dbus signal handler
def NewDataAvailable(self, *args, **kwargs):
argdict = dict(args[0])
sensorname = str(argdict['sensorname'])
for c in CONFIG:
if c['sensorname'] == sensorname:
self.maybe_on_off(c)
def maybe_on_off(self, c):
aqi = self.get_oneminute_average(c)
say(f"sensor {c['sensorname']} aqi now {aqi}")
fan_is_on = c.get('fan-is-on', False)
if fan_is_on and aqi <= c['off-thresh']:
self.change_fan_state(c, False)
elif (not fan_is_on) and aqi >= c['on-thresh']:
self.change_fan_state(c, True)
def get_oneminute_average(self, c):
db = self.pmsdb.get_raw_db()
cursor = db.cursor()
cursor.execute(
"""
select
avg("value") from sensordatav4_tsdb
where
sensorid=%s and
datatype=%s and
time > now() - interval '%s seconds'""", (
self.pmsdb.get_sensorid_by_name(c['sensorname']),
self.pmsdb.get_datatype_by_name(c['datatype']),
c['averaging-sec'])
)
row = cursor.fetchone()
# end the transaction - otherwise, the value of now() never changes
db.commit()
return(row[0])
def change_fan_state(self, c, onoff):
c['fan-is-on'] = onoff
say(f'Sensor {c["sensorname"]} tripped over threshold')
c['onoff-func'](onoff)
def main():
DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
ch = AQIChangeHandler(bus)
loop = GLib.MainLoop()
loop.run()
main()
```
#### File: v3/integrations/tplink_controller.py
```python
from tplinkcloud import TPLinkDeviceManager
import asyncio
import os
import pathlib
import sys
import yaml
# project libraries
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
from common.mylogging import say
class TPLinkController():
def __init__(self, device_name):
config_fn = os.path.join(
pathlib.Path.home(),
".config", "aqi", "tplink-client.yml")
config = yaml.safe_load(open(config_fn, "r"))
if not device_name in config['devices']:
raise(f"No such device '{device_name}' in config file '{config_fn}'")
self.config = config['devices'][device_name]
async def async_set_plug_state(self, onoff):
say(f"Trying to set {self.config['username']}, device {self.config['device_name']} to {onoff}")
device_manager = TPLinkDeviceManager(self.config['username'], self.config['password'])
device = await device_manager.find_device(self.config['device_name'])
if not device:
raise Exception(f"Could not find {self.config['device_name']}")
if onoff:
await device.power_on()
else:
await device.power_off()
def set_plug_state(self, onoff):
asyncio.run(self.async_set_plug_state(onoff))
if __name__ == '__main__':
gracie_fan = TPLinkController("Gracie Fan")
gracie_fan.set_plug_state(False)
```
|
{
"source": "jelszo-co/novel",
"score": 2
}
|
#### File: backend/authorization/views.py
```python
import json
from django.http import JsonResponse
from django.utils.decorators import method_decorator
from django.views import View
from authorization.decorator import permission_needed
class UserView(View):
def get(self, request, *args, **kwargs):
user = request.fb_user
print(user)
return JsonResponse({
'stranger': not user.isAuthenticated,
'anonim': user.isAnonymous,
'authenticated': user.isAuthenticated and not user.isAnonymous and not user.isAdmin,
'admin': user.isAdmin,
'name': user.name
})
@method_decorator(permission_needed(lambda request: request.fb_user.isAnonymous, 'Log in to change your name',
'Anonymous accounts can\'t change name'))
def put(self, request, *args, **kwargs):
decoded = json.loads(request.body)
if 'name' not in decoded or type(decoded['name']) != str:
return JsonResponse({'error': 'name parameter not found'}, status=400)
user = request.fb_user
user.name = decoded['name']
user.save()
return JsonResponse({'name': user.name})
@method_decorator(permission_needed(lambda request: request.fb_user.isAnonymous, 'Log in to delete your account',
'Anonymous accounts can\'t delete themselves'))
def delete(self, request, *args, **kwargs):
request.fb_user.delete()
return JsonResponse({'success': 'Deleted successfully'})
```
|
{
"source": "JelteF/bottor",
"score": 3
}
|
#### File: app/controllers/peer.py
```python
from app import db
from app.models import Peer
from datetime import datetime
class PeerController:
@staticmethod
def create(peer):
"""Create peer by peer dict."""
if type(peer) is dict:
peer = Peer.new_dict(peer)
else:
peer = Peer(peer)
db.session.add(peer)
db.session.commit()
return peer
@staticmethod
def get(peer_id):
"""Get peer by id."""
return Peer.query.get(peer_id)
@staticmethod
def get_all():
"""Get all peers."""
return Peer.query.all()
@staticmethod
def delete(peer):
""" Delete peer item """
db.session.delete(peer)
db.session.commit()
@staticmethod
def ping(peer_id, CPU):
peer = PeerController.get(peer_id)
peer.active = True
peer.last_active = datetime.now()
peer.CPU = CPU
db.session.add(peer)
db.session.commit()
```
#### File: tracker/app/__init__.py
```python
from flask import Flask, render_template
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
# Startup stuff
app = Flask(__name__)
app.config.from_object('config')
# This is the path to the upload directory
app.config['UPLOAD_FOLDER'] = 'app/static/matrix/'
# These are the extension that we are accepting to be uploaded
app.config['ALLOWED_EXTENSIONS'] = set(['txt'])
# code for clienthandshake
app.config['CLIENT_HANDSHAKE'] = 'ILIKETURTLES'
app.config['active_time'] = 120
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
db = SQLAlchemy(app)
# Register blueprints
from app.api import *
from app.views.views import views_blueprint
from app.views.login import login_blueprint
app.register_blueprint(upload_api)
app.register_blueprint(peer_api)
app.register_blueprint(matrix_api)
app.register_blueprint(views_blueprint)
app.register_blueprint(login_blueprint)
app.register_blueprint(task_api)
app.register_blueprint(job_api)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.htm'), 404
# Add methods and modules to jinja environment
from app.utils import serialize_sqla
import json
app.jinja_env.globals.update(json=json)
app.jinja_env.globals.update(serialize_sqla=serialize_sqla)
app.jinja_env.globals.update(len=len)
```
#### File: app/utils/base_model.py
```python
from app import db
from app.utils import serialize_sqla
from datetime import datetime
import dateutil.parser
class BaseEntity(object):
__table_args__ = {'sqlite_autoincrement': True}
# Only json items if explicitly defined, and just print id when not
# defined.
jsons = None
json_relationships = None
prints = ('id',)
# Columns that every model needs.
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime, default=datetime.now)
modified = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
def __repr__(self):
"""Used by print to print a model at server side. It uses the prints
attribute from the object to determine what values to print."""
first = True
string = '<%s(' % (type(self).__name__)
for attr in self.prints:
string += (', ' if not first else '') + '"%s"' % (getattr(self,
attr))
first = False
string += ')>'
return string
# Function to
def to_dict(self):
"""Convert a sqlalchemy object instance to a dictionary.
This is needed for json serialization of an object. The jsons attribute
is used to determine what values to serialize (password hashes and such
should not in there).
"""
attrs = {}
set_jsons = False
if not self.jsons:
self.jsons = (column.name for column in self.__table__.columns)
set_jsons = True
for column in self.jsons:
value = serialize_sqla(getattr(self, column))
attrs[column] = value
if self.json_relationships:
for rel in self.json_relationships:
attrs[rel] = serialize_sqla(getattr(self, rel).all())
if set_jsons:
self.jsons = None
return attrs
@classmethod
def merge_dict(cls, obj, relationships={}):
"""Merge dictionary as object."""
# Get the correct entry from the database.
if 'id' in obj and obj['id']:
entry = cls.by_id(obj['id'])
if not entry:
return None
# If the dict doesn't contain id it means the entry does not exist yet.
else:
entry = cls()
# Remove id, created and modified, since those are things you want to
# automaticaly update.
obj.pop('id', None)
obj.pop('created', None)
obj.pop('modified', None)
column_names = tuple(column.name for column in cls.__table__.columns)
# Update all values from the dict that exist as a column or a
# relationship.
for key, value in obj.items():
if key in column_names:
columntype = str(cls.__table__.columns[key].type)
if columntype == 'DATE' and value is not None:
if isinstance(value, str):
value = dateutil.parser.parse(value)
elif columntype == 'TIME' and value is not None:
if isinstance(value, str):
value = dateutil.parser.parse(value).time()
setattr(entry, key, value)
elif key in relationships:
setattr(entry, key, relationships[key].by_ids(value))
return entry
# For future proofing use new_dict when creating new entries, so it could
# become a separate function if needed.
new_dict = merge_dict
@classmethod
def by_id(cls, _id):
"""Get entry by id."""
return cls.query.filter_by(id=_id).first()
@classmethod
def by_ids(cls, ids):
"""Get entries by id list."""
try:
return db.session.query(cls).filter(cls.id.in_(ids)).all()
except:
return []
```
#### File: app/utils/__init__.py
```python
def serialize_sqla(data):
"""Serialiation function to serialize any dicts or lists containing
sqlalchemy objects. This is needed for conversion to JSON format."""
# If has to_dict this is asumed working and it is used.
if hasattr(data, 'to_dict'):
return data.to_dict()
if hasattr(data, '__dict__'):
return data.__dict__
# DateTime objects should be returned as isoformat.
if hasattr(data, 'isoformat'):
return str(data.isoformat())
# Items in lists are iterated over and get serialized separetly.
if isinstance(data, (list, tuple, set)):
return [serialize_sqla(item) for item in data]
# Dictionaries get iterated over.
if isinstance(data, dict):
result = {}
for key, value in data.items():
result[key] = serialize_sqla(value)
return result
# Just hope it works.
return data
def row2dict(row):
if(not row):
return None
d = {}
for column in row.__table__.columns:
d[column.name] = getattr(row, column.name)
return d
```
#### File: bottor/tracker/tracker_test.py
```python
import unittest
import os
import json
import time
from glob import glob
from app import app, db
from app.models.matrix import Matrix
from app.controllers.matrix import MatrixController
from app.controllers.job import JobController
from app.controllers.taskmanager import TaskManager
from app.controllers.task import TaskController
class TrackerTestCase(unittest.TestCase):
def create_app(self):
return app
def setUp(self):
filelist = glob("app/*.sqlite")
filelist += (glob("app/*.db"))
for f in filelist:
os.remove(f)
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
# def testMatrixController(self):
# matrix = MatrixController.createFromFile("sample_matrices/A50")
# assert matrix
# matrix3 = MatrixController.createFromArray(Matrix.matrices[matrix.id])
# assert matrix3
# MatrixController.setCell(matrix3, 0, 0, 0)
# MatrixController.writeToFile(matrix3, "")
# row = MatrixController.getRow(matrix3, 0)
# column = MatrixController.getColumn(matrix3, 0)
# matrix2 = MatrixController.createEmptyMatrix(20, 20, 0)
# assert matrix2
# MatrixController.writeToFile(matrix2)
# MatrixController.writeToFile(matrix2, "result_matrices/BLABLA")
# def testJobController(self):
# matrixA = "sample_matrices/A20"
# matrixB = "sample_matrices/B20"
# job = JobController.create(matrixA, matrixB)
# assert job
# array = Matrix.matrices[job.getTaskMatrix()]
# MatrixController.writeArrayToFile(array, "result_matrices/test3a")
# task1 = JobController.getTask(job, 1)
# assert task1
# print (job.running)
# task2 = JobController.getTask(job, 1)
# assert task2
# job2 = JobController.getJobWithFreeTask()
# assert job2
# task3 = JobController.getTask(job, 1)
# assert task3
# resArray = Matrix.matrices[job2.getTaskMatrix()]
# MatrixController.writeArrayToFile(array, "result_matrices/test3b")
# def testTaskManager(self):
# matrixA = "sample_matrices/A20"
# matrixB = "sample_matrices/B20"
# job6 = JobController.create(matrixA, matrixB)
# assert job6
# job2 = JobController.create(matrixA, matrixB)
# task1 = TaskManager.getTask(1)
# while task1:
# job = JobController.get(task1.job)
# array = Matrix.matrices[job.getTaskMatrix()]
# MatrixController.writeArrayToFile(array, "result_matrices/testTaskManager" + str(job.id))
# task1 = TaskManager.getTask(1)
# TaskController.cancelTask(2)
# array = Matrix.matrices[job6.getTaskMatrix()]
# MatrixController.writeArrayToFile(array, "result_matrices/testTaskManager" + str(job6.id) + "b")
# print (job6.running)
# print (job6.free)
def testShizzle(self):
matrixA = "sample_matrices/A20"
matrixB = "sample_matrices/B20"
job = JobController.create(matrixA, matrixB)
job2 = JobController.create(matrixA, matrixB)
task1 = TaskManager.getTask(1)
assert task1
task2 = TaskManager.getTask(1)
assert task2
array = Matrix.matrices[job.getTaskMatrix()]
MatrixController.writeArrayToFile(array, "result_matrices/test_job_cancelling_1")
time.sleep(2)
task3 = TaskManager.getTask(1)
assert task3
task4 = TaskManager.getTask(1)
MatrixController.writeArrayToFile(array, "result_matrices/test_job_cancelling_2")
TaskManager.cancelLongRunningTasks()
MatrixController.writeArrayToFile(array, "result_matrices/test_job_cancelling_3")
# def testShit(self):
# matrixB = MatrixController.createFromFile("sample_matrices/B20")
# transposed = MatrixController.transpose(matrixB)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JelteF/easyfuse",
"score": 2
}
|
#### File: easyfuse/easyfuse/utils.py
```python
import llfuse
from llfuse import FUSEError
from contextlib import contextmanager
import errno
import logging
import traceback
import os
@contextmanager
def _convert_error_to_fuse_error(action, thing):
try:
yield
except Exception as e:
if isinstance(e, FUSEError):
raise e
logging.error('Something went wrong when %s %s: %s', action, thing, e)
if logging.getLogger().isEnabledFor(logging.DEBUG):
# DEBUG logging, print stacktrace
traceback.print_exc()
raise FUSEError(errno.EAGAIN)
def mount(operations, mountpoint, options=None, *,
override_default_options=False, workers=30):
"""Mount a file system.
Args
----
operations: `~.Operations`
The operations handler for the file system.
mountpoint: str
The directory on which the file system should be mounted.
options: set
A set of options that should be used when mounting.
override_default_options: bool
If this is set to `True` only the supplied options will be used.
Otherwise the options will be added to the defaults. The defaults are
the defaults supplied by `llfuse.default_options`.
workers: int
The amount of worker threads that should be spawned to handle the file
operations.
"""
operations.mountpoint = os.path.abspath(mountpoint)
if options is None:
options = llfuse.default_options
elif not override_default_options:
options |= llfuse.default_options
llfuse.init(operations, mountpoint, options)
try:
llfuse.main(workers=workers)
finally:
llfuse.close()
```
|
{
"source": "JelteF/statistics",
"score": 3
}
|
#### File: statistics/3/confidence.py
```python
from random import sample
from numpy import std
from math import sqrt
TEST_CASE_N = 100
TEST_DATA_N = 50
C = 2.009
ALPHA = 0.05
def main():
with open('tijden-medium.log', 'r') as f:
data = []
for line in f:
data.append(float(line.rstrip('\n')))
confidence(data)
def confidence(data):
total_mu = sum(data) / len(data)
hits = 0
for i in range(TEST_CASE_N):
if test_case(data, total_mu):
hits += 1
print('Estimated chance: %f' % (1-ALPHA))
print('Hits: %d, misses: %d, calculated chance: %f' % (hits,
TEST_CASE_N-hits,
hits/TEST_CASE_N))
def test_case(data, mu):
test_data = sample(data, TEST_DATA_N)
x_avg = sum(test_data) / TEST_DATA_N
s = std(test_data)
t = (x_avg - mu) / (s / sqrt(TEST_DATA_N))
hit = t >= -C and t <= C
print('$%f \leq %f \leq %f$ & %s \\\\' % (-C, t, C,
'binnen interval' if hit
else 'buiten interval'))
return hit
if __name__ == '__main__':
main()
```
#### File: statistics/3/ibm_rng.py
```python
def ibm_rng(x1, a=65539, c=0, m=2**31):
x = x1
while True:
x = (a * x + c) % m
yield x / (m-1)
def main():
rng = ibm_rng(1, 65539, 0, 2**31)
while True:
x = next(rng)
print(x)
if __name__ == '__main__':
main()
```
#### File: statistics/5/minerr_2.py
```python
from numpy import arange, loadtxt, \
zeros, sum, array, logical_and
import matplotlib.pyplot as plt
import numpy.random as rd
import numpy as np
import pandas as pd
from scipy.stats import multivariate_normal
def cnvt(s):
tab = {'Iris-setosa': 1.0, 'Iris-versicolor': 2.0, 'Iris-virginica': 3.0}
s = s.decode()
if s in tab:
return tab[s]
else:
return -1.0
DATA = loadtxt('data/iris.data', delimiter=',', dtype=float,
converters={4: cnvt})
def main():
seed = rd.randint(10000)
n = 1
accuracies = np.zeros((n, ))
for i in range(n):
accuracies[i] = do_minerr(seed, plot=False, print_=True)
seed += 1
mean_accuracy = np.mean(accuracies)
print('The accuracy is: ', mean_accuracy * 100, '%', sep='')
class MinError():
def __init__(self, X):
X = pd.DataFrame(X)
self.pdfs = {}
self.class_chances = {}
for name, g in X.groupby(X.columns[-1]):
data = g.as_matrix()[:, :-1]
mean = data.mean(axis=0)
cov = np.cov(data.T)
self.pdfs[name] = multivariate_normal(mean=mean, cov=cov).pdf
self.class_chances[name] = len(g) / len(X)
def classify(self, x):
best_class = None
best_chance = 0
for cls, pdf in self.pdfs.items():
chance = pdf(x) * self.class_chances[cls]
if chance > best_chance:
best_chance = chance
best_class = cls
return best_class
def do_minerr(seed=None, plot=True, print_=True):
if seed is not None:
rd.seed(seed)
ind = arange(150) # indices into the dataset
ind = rd.permutation(ind) # random permutation
L = ind[0:90] # learning set indices
T = ind[90:] # test set indices
# Learning Set
X = DATA[L, :]
classifier = MinError(X)
# Classification of Test Set
c = zeros(len(T))
for i in arange(len(T)):
c[i] = classifier.classify(DATA[T[i], 0:4])
# Confusion Matrix
CM = zeros((3, 3))
for i in range(3):
for j in range(3):
CM[i, j] = sum(logical_and(DATA[T, -1] == (i+1), c == (j+1)))
if print_:
print(CM)
if plot:
plot_stuff(T, c)
return np.sum(c == DATA[T, -1]) / len(DATA[T])
def plot_stuff(T, c):
# Plot Test Set
plt.figure(1)
color = array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
for i in range(4):
for j in range(4):
plt.subplot(4, 4, 4*i + j + 1)
if i == j:
continue
plt.scatter(DATA[T, i], DATA[T, j], s=100, marker='s',
edgecolor=color[DATA[T, 4].astype(int)-1],
facecolor=[[1, 1, 1]] * len(T))
plt.scatter(DATA[T, i], DATA[T, j], s=30, marker='+',
edgecolor=color[c.astype(int)-1])
plt.show()
if __name__ == '__main__':
main()
```
#### File: statistics/5/nnb.py
```python
from numpy import tile, sum, argmin
import numpy as np
from scipy.stats import mode
class NNb:
def __init__(self, X, c):
self.n, self.N = X.shape
self.X = X
self.c = c
def classify(self, x):
d = self.X - tile(x.reshape(self.n, 1), self.N)
dsq = sum(d * d, 0)
minindex = argmin(dsq)
return self.c[minindex]
class kNNb:
def __init__(self, X, c):
self.n, self.N = X.shape
self.X = X
self.c = c
def classify(self, x, k):
d = self.X - tile(x.reshape(self.n, 1), self.N)
dsq = sum(d * d, 0)
best_k_i = np.argsort(dsq, axis=0)[:k]
best_k = self.c[best_k_i]
return mode(best_k)[0][0]
```
#### File: statistics/5/run_knnb.py
```python
from numpy import arange, loadtxt, transpose,\
zeros, sum, array, logical_and
import matplotlib.pyplot as plt
import numpy.random as rd
from nnb import kNNb
import numpy as np
def cnvt(s):
tab = {'Iris-setosa': 1.0, 'Iris-versicolor': 2.0, 'Iris-virginica': 3.0}
s = s.decode()
if s in tab:
return tab[s]
else:
return -1.0
XC = loadtxt('data/iris.data', delimiter=',', dtype=float,
converters={4: cnvt})
def main():
seed = rd.randint(10000)
test_k = range(1, 10, 2)
n = 400
accuracies = np.zeros((n, len(test_k)))
for i in range(n):
for j, k in enumerate(test_k):
accuracies[i, j] = do_knnb(k, seed, plot=False, print_=False)
seed += 1
mean_accuracies = np.mean(accuracies, axis=0)
print('Acuracy for every k\'s from 1 to 9')
for k, a in zip(test_k, mean_accuracies):
print(k, '%f%%' % (a * 100), sep=': ')
def do_knnb(k, seed=None, plot=True, print_=True):
if seed is not None:
rd.seed(seed)
ind = arange(150) # indices into the dataset
ind = rd.permutation(ind) # random permutation
L = ind[0:90] # learning set indices
T = ind[90:] # test set indices
# Learning Set
X = transpose(XC[L, 0:-1])
nnc = kNNb(X, XC[L, -1])
# Classification of Test Set
c = zeros(len(T))
for i in arange(len(T)):
c[i] = nnc.classify(XC[T[i], 0:4], k)
# Confusion Matrix
CM = zeros((3, 3))
for i in range(3):
for j in range(3):
CM[i, j] = sum(logical_and(XC[T, -1] == (i+1), c == (j+1)))
if print_:
print(k)
print(CM)
if plot:
plot_stuff(T, c)
return np.sum(c == XC[T, -1]) / len(XC[T])
def plot_stuff(T, c):
# Plot Test Set
plt.figure(1)
color = array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
for i in range(4):
for j in range(4):
plt.subplot(4, 4, 4*i + j + 1)
if i == j:
continue
plt.scatter(XC[T, i], XC[T, j], s=100, marker='s',
edgecolor=color[XC[T, 4].astype(int)-1],
facecolor=[[1, 1, 1]] * len(T))
plt.scatter(XC[T, i], XC[T, j], s=30, marker='+',
edgecolor=color[c.astype(int)-1])
plt.show()
if __name__ == '__main__':
main()
```
#### File: statistics/6/iris_svm.py
```python
from sklearn import preprocessing
from sklearn import svm
import numpy as np
import numpy.random as rd
def main(seed=None):
data = np.loadtxt('data/iris.data', delimiter=',', dtype=float,
converters={4: cnvt})
# Split data into a matrix X containing all RVs, and y, containing all
# classes.
X = data[:, :4]
y = data[:, 4:].reshape((150, ))
# Scale data.
X_scaled = preprocessing.scale(X)
# Choose learn and test data.
if seed is not None:
rd.seed(seed)
ind = np.arange(150) # indices into the dataset
ind = rd.permutation(ind) # random permutation
L = ind[0:90] # learning set indices
T = ind[90:] # test set indices
# Learning set.
X_learn = X_scaled[L, :]
y_learn = y[L]
# Create SVM.
clf = svm.SVC()
# Fit data.
clf.fit(X_learn, y_learn)
# Test set.
X_test = X_scaled[T, :]
y_test = y[T]
# Test all data.
pred_clss = clf.predict(X_test)
# Create confusion matrix.
cm = np.zeros((3, 3))
for pred_cls, cls in zip(pred_clss, y_test):
cm[cls, pred_cls] += 1
print(cm)
def cnvt(s):
tab = {'Iris-setosa': 0.0, 'Iris-versicolor': 1.0, 'Iris-virginica': 2.0}
s = s.decode()
if s in tab:
return tab[s]
else:
return -1.0
if __name__ == '__main__':
main()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.